summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ObsoleteFiles.inc8
-rw-r--r--contrib/llvm/CMakeLists.txt2
-rw-r--r--contrib/llvm/Makefile6
-rw-r--r--contrib/llvm/Makefile.config.in14
-rw-r--r--contrib/llvm/Makefile.rules108
-rw-r--r--contrib/llvm/autoconf/configure.ac7
-rw-r--r--contrib/llvm/autoconf/m4/link_options.m423
-rw-r--r--contrib/llvm/bindings/ada/llvm/llvm.ads6
-rwxr-xr-xcontrib/llvm/configure203
-rw-r--r--contrib/llvm/include/llvm-c/Core.h3
-rw-r--r--contrib/llvm/include/llvm-c/Target.h3
-rw-r--r--contrib/llvm/include/llvm-c/lto.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h8
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h75
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h1
-rw-r--r--contrib/llvm/include/llvm/ADT/EquivalenceClasses.h2
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h83
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h12
-rw-r--r--contrib/llvm/include/llvm/ADT/PostOrderIterator.h17
-rw-r--r--contrib/llvm/include/llvm/ADT/SetVector.h8
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallPtrSet.h49
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h160
-rw-r--r--contrib/llvm/include/llvm/ADT/Statistic.h4
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h7
-rw-r--r--contrib/llvm/include/llvm/ADT/ValueMap.h6
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist.h1
-rw-r--r--contrib/llvm/include/llvm/AbstractTypeUser.h3
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h21
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFGPrinter.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/CaptureTracking.h6
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h72
-rw-r--r--contrib/llvm/include/llvm/Analysis/DebugInfo.h9
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominatorInternals.h18
-rw-r--r--contrib/llvm/include/llvm/Analysis/Dominators.h22
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h45
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalIterator.h22
-rw-r--r--contrib/llvm/include/llvm/Analysis/Loads.h51
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h59
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h4
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h8
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h15
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h2
-rw-r--r--contrib/llvm/include/llvm/Bitcode/ReaderWriter.h3
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h142
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CallingConvLower.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h64
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h (renamed from contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h)24
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h96
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h28
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h7
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h1
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h101
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h21
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h21
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h12
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h9
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h30
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h2
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h16
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h15
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h15
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h94
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h4
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h90
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h11
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h34
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h39
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h15
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h133
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h61
-rw-r--r--contrib/llvm/include/llvm/Config/config.h.in3
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h4
-rw-r--r--contrib/llvm/include/llvm/GlobalValue.h17
-rw-r--r--contrib/llvm/include/llvm/InlineAsm.h3
-rw-r--r--contrib/llvm/include/llvm/InstrTypes.h10
-rw-r--r--contrib/llvm/include/llvm/Instructions.h77
-rw-r--r--contrib/llvm/include/llvm/IntrinsicInst.h64
-rw-r--r--contrib/llvm/include/llvm/Intrinsics.td6
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h1
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h4
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h22
-rw-r--r--contrib/llvm/include/llvm/MC/MCDirectives.h3
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h56
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h2
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmParser.h44
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h7
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h32
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h66
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h6
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionCOFF.h52
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h10
-rw-r--r--contrib/llvm/include/llvm/MC/SectionKind.h64
-rw-r--r--contrib/llvm/include/llvm/Module.h29
-rw-r--r--contrib/llvm/include/llvm/Pass.h27
-rw-r--r--contrib/llvm/include/llvm/PassAnalysisSupport.h13
-rw-r--r--contrib/llvm/include/llvm/PassManagers.h5
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h8
-rw-r--r--contrib/llvm/include/llvm/Support/CFG.h2
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h217
-rw-r--r--contrib/llvm/include/llvm/Support/CallSite.h12
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.h87
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h264
-rw-r--r--contrib/llvm/include/llvm/Support/IRBuilder.h113
-rw-r--r--contrib/llvm/include/llvm/Support/IRReader.h6
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryBuffer.h31
-rw-r--r--contrib/llvm/include/llvm/Support/Regex.h4
-rw-r--r--contrib/llvm/include/llvm/Support/StringPool.h2
-rw-r--r--contrib/llvm/include/llvm/Support/Timer.h6
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h4
-rw-r--r--contrib/llvm/include/llvm/SymbolTableListTraits.h5
-rw-r--r--contrib/llvm/include/llvm/System/DataTypes.h.cmake52
-rw-r--r--contrib/llvm/include/llvm/System/Path.h8
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td28
-rw-r--r--contrib/llvm/include/llvm/Target/TargetAsmParser.h6
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h142
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrDesc.h6
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h151
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrItineraries.h3
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h282
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOpcodes.h48
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h2
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h60
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h5
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h18
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h4
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h30
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h11
-rw-r--r--contrib/llvm/include/llvm/Type.h6
-rw-r--r--contrib/llvm/include/llvm/Use.h1
-rw-r--r--contrib/llvm/include/llvm/Value.h8
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp1
-rw-r--r--contrib/llvm/lib/Analysis/AliasDebugger.cpp12
-rw-r--r--contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp145
-rw-r--r--contrib/llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--contrib/llvm/lib/Analysis/ConstantFolding.cpp18
-rw-r--r--contrib/llvm/lib/Analysis/DebugInfo.cpp80
-rw-r--r--contrib/llvm/lib/Analysis/DomPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Analysis/IPA/CallGraph.cpp8
-rw-r--r--contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp38
-rw-r--r--contrib/llvm/lib/Analysis/InlineCost.cpp27
-rw-r--r--contrib/llvm/lib/Analysis/InstructionSimplify.cpp48
-rw-r--r--contrib/llvm/lib/Analysis/Lint.cpp239
-rw-r--r--contrib/llvm/lib/Analysis/Loads.cpp235
-rw-r--r--contrib/llvm/lib/Analysis/LoopInfo.cpp7
-rw-r--r--contrib/llvm/lib/Analysis/MemoryBuiltins.cpp22
-rw-r--r--contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp29
-rw-r--r--contrib/llvm/lib/Analysis/PostDominators.cpp5
-rw-r--r--contrib/llvm/lib/Analysis/ProfileInfo.cpp16
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolution.cpp325
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp40
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp135
-rw-r--r--contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp15
-rw-r--r--contrib/llvm/lib/Analysis/ValueTracking.cpp4
-rw-r--r--contrib/llvm/lib/Archive/ArchiveWriter.cpp12
-rw-r--r--contrib/llvm/lib/AsmParser/LLLexer.cpp1
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.cpp136
-rw-r--r--contrib/llvm/lib/AsmParser/LLParser.h1
-rw-r--r--contrib/llvm/lib/AsmParser/LLToken.h6
-rw-r--r--contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp26
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp33
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp13
-rw-r--r--contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h7
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp83
-rw-r--r--contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h1
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp96
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp246
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h18
-rw-r--r--contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.cpp68
-rw-r--r--contrib/llvm/lib/CodeGen/BranchFolding.h5
-rw-r--r--contrib/llvm/lib/CodeGen/CMakeLists.txt7
-rw-r--r--contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/CallingConvLower.cpp (renamed from contrib/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp)14
-rw-r--r--contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp158
-rw-r--r--contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h5
-rw-r--r--contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp110
-rw-r--r--contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/ExactHazardRecognizer.h86
-rw-r--r--contrib/llvm/lib/CodeGen/GCStrategy.cpp6
-rw-r--r--contrib/llvm/lib/CodeGen/IfConversion.cpp400
-rw-r--r--contrib/llvm/lib/CodeGen/InlineSpiller.cpp408
-rw-r--r--contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp51
-rw-r--r--contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp15
-rw-r--r--contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/LiveInterval.cpp83
-rw-r--r--contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp236
-rw-r--r--contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/LiveVariables.cpp9
-rw-r--r--contrib/llvm/lib/CodeGen/LowerSubregs.cpp217
-rw-r--r--contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp129
-rw-r--r--contrib/llvm/lib/CodeGen/MachineCSE.cpp46
-rw-r--r--contrib/llvm/lib/CodeGen/MachineDominators.cpp1
-rw-r--r--contrib/llvm/lib/CodeGen/MachineFunction.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/MachineInstr.cpp113
-rw-r--r--contrib/llvm/lib/CodeGen/MachineLICM.cpp121
-rw-r--r--contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp23
-rw-r--r--contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp130
-rw-r--r--contrib/llvm/lib/CodeGen/MachineSink.cpp102
-rw-r--r--contrib/llvm/lib/CodeGen/MachineVerifier.cpp3
-rw-r--r--contrib/llvm/lib/CodeGen/OptimizeExts.cpp24
-rw-r--r--contrib/llvm/lib/CodeGen/OptimizePHIs.cpp5
-rw-r--r--contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h2
-rw-r--r--contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h5
-rw-r--r--contrib/llvm/lib/CodeGen/PHIElimination.cpp63
-rw-r--r--contrib/llvm/lib/CodeGen/Passes.cpp26
-rw-r--r--contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp (renamed from contrib/llvm/lib/CodeGen/ExactHazardRecognizer.cpp)26
-rw-r--r--contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp51
-rw-r--r--contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp89
-rw-r--r--contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp69
-rw-r--r--contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp65
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocFast.cpp226
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocLocal.cpp1254
-rw-r--r--contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp25
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp156
-rw-r--r--contrib/llvm/lib/CodeGen/RegisterScavenging.cpp33
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAG.cpp37
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h5
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt1
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp267
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp327
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp63
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp133
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp335
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp40
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp150
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp72
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h4
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp8
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp140
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp17
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp241
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h5
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp158
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp1257
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h12
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp421
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp2
-rw-r--r--contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp218
-rw-r--r--contrib/llvm/lib/CodeGen/ShadowStackGC.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleHazardRecognizer.h89
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp1790
-rw-r--r--contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h84
-rw-r--r--contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp114
-rw-r--r--contrib/llvm/lib/CodeGen/SlotIndexes.cpp4
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.cpp209
-rw-r--r--contrib/llvm/lib/CodeGen/Spiller.h18
-rw-r--r--contrib/llvm/lib/CodeGen/StackProtector.cpp16
-rw-r--r--contrib/llvm/lib/CodeGen/StackSlotColoring.cpp14
-rw-r--r--contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp22
-rw-r--r--contrib/llvm/lib/CodeGen/TailDuplication.cpp18
-rw-r--r--contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp182
-rw-r--r--contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp112
-rw-r--r--contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp303
-rw-r--r--contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp110
-rw-r--r--contrib/llvm/lib/CompilerDriver/Tool.cpp3
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp2
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp54
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JIT.h34
-rw-r--r--contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp10
-rw-r--r--contrib/llvm/lib/Linker/LinkItems.cpp29
-rw-r--r--contrib/llvm/lib/MC/CMakeLists.txt3
-rw-r--r--contrib/llvm/lib/MC/MCAsmStreamer.cpp24
-rw-r--r--contrib/llvm/lib/MC/MCAssembler.cpp18
-rw-r--r--contrib/llvm/lib/MC/MCContext.cpp11
-rw-r--r--contrib/llvm/lib/MC/MCExpr.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCMachOStreamer.cpp161
-rw-r--r--contrib/llvm/lib/MC/MCObjectStreamer.cpp39
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmLexer.cpp7
-rw-r--r--contrib/llvm/lib/MC/MCParser/AsmParser.cpp788
-rw-r--r--contrib/llvm/lib/MC/MCParser/CMakeLists.txt3
-rw-r--r--contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp758
-rw-r--r--contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp68
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp6
-rw-r--r--contrib/llvm/lib/MC/MCParser/MCAsmParserExtension.cpp21
-rw-r--r--contrib/llvm/lib/MC/MCSectionCOFF.cpp14
-rw-r--r--contrib/llvm/lib/MC/MachObjectWriter.cpp61
-rw-r--r--contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp71
-rw-r--r--contrib/llvm/lib/MC/WinCOFFStreamer.cpp198
-rw-r--r--contrib/llvm/lib/Support/APFloat.cpp9
-rw-r--r--contrib/llvm/lib/Support/APInt.cpp6
-rw-r--r--contrib/llvm/lib/Support/CMakeLists.txt1
-rw-r--r--contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp357
-rw-r--r--contrib/llvm/lib/Support/DeltaAlgorithm.cpp4
-rw-r--r--contrib/llvm/lib/Support/Dwarf.cpp511
-rw-r--r--contrib/llvm/lib/Support/FileUtilities.cpp26
-rw-r--r--contrib/llvm/lib/Support/FoldingSet.cpp20
-rw-r--r--contrib/llvm/lib/Support/MemoryBuffer.cpp180
-rw-r--r--contrib/llvm/lib/Support/PrettyStackTrace.cpp23
-rw-r--r--contrib/llvm/lib/Support/Regex.cpp4
-rw-r--r--contrib/llvm/lib/Support/SmallPtrSet.cpp9
-rw-r--r--contrib/llvm/lib/Support/SmallVector.cpp21
-rw-r--r--contrib/llvm/lib/Support/StringPool.cpp2
-rw-r--r--contrib/llvm/lib/Support/Timer.cpp10
-rw-r--r--contrib/llvm/lib/Support/Triple.cpp5
-rw-r--r--contrib/llvm/lib/Support/raw_ostream.cpp8
-rw-r--r--contrib/llvm/lib/System/Disassembler.cpp16
-rw-r--r--contrib/llvm/lib/System/Path.cpp29
-rw-r--r--contrib/llvm/lib/System/Unix/Path.inc23
-rw-r--r--contrib/llvm/lib/System/Unix/Program.inc5
-rw-r--r--contrib/llvm/lib/System/Unix/Signals.inc17
-rw-r--r--contrib/llvm/lib/System/Win32/Path.inc6
-rw-r--r--contrib/llvm/lib/System/Win32/Signals.inc2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARM.td4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMAddressingModes.h65
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp629
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h87
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp191
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h24
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp174
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp58
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h1
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp10
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp551
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp1225
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMISelLowering.h52
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrFormats.td44
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.h2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrInfo.td169
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrNEON.td224
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb.td23
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td100
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMInstrVFP.td8
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMJITInfo.h3
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp194
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h12
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td87
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA8.td84
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleA9.td364
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMScheduleV6.td2
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMSubtarget.h4
-rw-r--r--contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp30
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp37
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp55
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp26
-rw-r--r--contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h5
-rw-r--r--contrib/llvm/lib/Target/ARM/CMakeLists.txt1
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp113
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.h8
-rw-r--r--contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h42
-rw-r--r--contrib/llvm/lib/Target/ARM/NEONMoveFix.cpp4
-rw-r--r--contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp51
-rw-r--r--contrib/llvm/lib/Target/ARM/README.txt67
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp125
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h24
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h6
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp53
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h40
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp160
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp192
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h39
-rw-r--r--contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp16
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp39
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td4
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp109
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h27
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td34
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp4
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp13
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp108
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h15
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td12
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp30
-rw-r--r--contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h6
-rw-r--r--contrib/llvm/lib/Target/CBackend/CBackend.cpp50
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td82
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp15
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp126
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h3
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp131
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h33
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPUNodes.td2
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp57
-rw-r--r--contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h13
-rw-r--r--contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp3341
-rw-r--r--contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp5
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp88
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp65
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h25
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/MSIL/MSILWriter.cpp26
-rw-r--r--contrib/llvm/lib/Target/MSIL/MSILWriter.h2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp43
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h3
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp39
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h12
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td455
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp46
-rw-r--r--contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/Mangler.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp5
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp55
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp213
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.h25
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.h4
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp35
-rw-r--r--contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp46
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h4
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp37
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h13
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td18
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp2
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp24
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h4
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp7
-rw-r--r--contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h4
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp2
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp124
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h9
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp184
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h30
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp166
-rw-r--r--contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/README.txt134
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp61
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp98
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h25
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.cpp3
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp7
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp4
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp29
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h3
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td4
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp96
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h12
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td17
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp16
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td10
-rw-r--r--contrib/llvm/lib/Target/TargetInstrInfo.cpp4
-rw-r--r--contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp2
-rw-r--r--contrib/llvm/lib/Target/TargetMachine.cpp2
-rw-r--r--contrib/llvm/lib/Target/TargetRegisterInfo.cpp12
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp85
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp111
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp21
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h11
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp20
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp23
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h15
-rw-r--r--contrib/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp72
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/CMakeLists.txt4
-rw-r--r--contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp15
-rw-r--r--contrib/llvm/lib/Target/X86/README-SSE.txt184
-rw-r--r--contrib/llvm/lib/Target/X86/README-X86-64.txt249
-rw-r--r--contrib/llvm/lib/Target/X86/README.txt103
-rw-r--r--contrib/llvm/lib/Target/X86/X86.h4
-rw-r--r--contrib/llvm/lib/Target/X86/X86AsmBackend.cpp8
-rw-r--r--contrib/llvm/lib/Target/X86/X86COFF.h95
-rw-r--r--contrib/llvm/lib/Target/X86/X86CallingConv.td12
-rw-r--r--contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp20
-rw-r--r--contrib/llvm/lib/Target/X86/X86FastISel.cpp442
-rw-r--r--contrib/llvm/lib/Target/X86/X86FixupKinds.h1
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp111
-rw-r--r--contrib/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp17
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp196
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.cpp964
-rw-r--r--contrib/llvm/lib/Target/X86/X86ISelLowering.h44
-rw-r--r--contrib/llvm/lib/Target/X86/X86Instr64bit.td238
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrBuilder.h39
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFPStack.td16
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFormats.td93
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td336
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.cpp642
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.h170
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrInfo.td369
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrMMX.td14
-rw-r--r--contrib/llvm/lib/Target/X86/X86InstrSSE.td6351
-rw-r--r--contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp546
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp117
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.h6
-rw-r--r--contrib/llvm/lib/Target/X86/X86RegisterInfo.td32
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp69
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.h47
-rw-r--r--contrib/llvm/lib/Target/X86/X86TargetMachine.cpp8
-rw-r--r--contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp1
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp61
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreISelLowering.h8
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp70
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h15
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h3
-rw-r--r--contrib/llvm/lib/Transforms/Hello/Hello.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Hello/Hello.exports0
-rw-r--r--contrib/llvm/lib/Transforms/Hello/Makefile8
-rw-r--r--contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp51
-rw-r--r--contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/IPO/Inliner.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp18
-rw-r--r--contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp48
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp133
-rw-r--r--contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombine.h3
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp45
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp156
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp15
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp37
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp35
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp59
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp76
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp12
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ABCD.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ADCE.cpp2
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp8
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp59
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/GVN.cpp61
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp73
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp6
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp154
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp28
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp4
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp53
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp3
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp213
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp5
-rw-r--r--contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp96
-rw-r--r--contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp21
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp118
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp23
-rw-r--r--contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp67
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp100
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp33
-rw-r--r--contrib/llvm/lib/Transforms/Utils/CloneModule.cpp56
-rw-r--r--contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp22
-rw-r--r--contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp23
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LCSSA.cpp7
-rw-r--r--contrib/llvm/lib/Transforms/Utils/Local.cpp118
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp49
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp20
-rw-r--r--contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp140
-rw-r--r--contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp16
-rw-r--r--contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp86
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp10
-rw-r--r--contrib/llvm/lib/Transforms/Utils/ValueMapper.h4
-rw-r--r--contrib/llvm/lib/VMCore/AsmWriter.cpp47
-rw-r--r--contrib/llvm/lib/VMCore/AutoUpgrade.cpp48
-rw-r--r--contrib/llvm/lib/VMCore/ConstantFold.cpp13
-rw-r--r--contrib/llvm/lib/VMCore/Core.cpp18
-rw-r--r--contrib/llvm/lib/VMCore/Instruction.cpp20
-rw-r--r--contrib/llvm/lib/VMCore/Instructions.cpp65
-rw-r--r--contrib/llvm/lib/VMCore/IntrinsicInst.cpp6
-rw-r--r--contrib/llvm/lib/VMCore/Metadata.cpp4
-rw-r--r--contrib/llvm/lib/VMCore/Module.cpp9
-rw-r--r--contrib/llvm/lib/VMCore/Pass.cpp45
-rw-r--r--contrib/llvm/lib/VMCore/PassManager.cpp5
-rw-r--r--contrib/llvm/lib/VMCore/Value.cpp10
-rw-r--r--contrib/llvm/lib/VMCore/Verifier.cpp44
-rw-r--r--contrib/llvm/tools/Makefile5
-rw-r--r--contrib/llvm/tools/bugpoint/BugDriver.h6
-rw-r--r--contrib/llvm/tools/bugpoint/CrashDebugger.cpp31
-rw-r--r--contrib/llvm/tools/bugpoint/ExtractFunction.cpp28
-rw-r--r--contrib/llvm/tools/bugpoint/ListReducer.h4
-rw-r--r--contrib/llvm/tools/bugpoint/Miscompilation.cpp48
-rw-r--r--contrib/llvm/tools/bugpoint/ToolRunner.h2
-rw-r--r--contrib/llvm/tools/clang/CMakeLists.txt35
-rw-r--r--contrib/llvm/tools/clang/Makefile57
-rw-r--r--contrib/llvm/tools/clang/NOTES.txt3
-rw-r--r--contrib/llvm/tools/clang/README.txt2
-rw-r--r--contrib/llvm/tools/clang/include/Makefile4
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h21
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Makefile4
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h121
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h229
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CMakeLists.txt15
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h243
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h64
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h206
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h310
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h148
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h220
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Makefile20
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h1913
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h75
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateName.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h231
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h126
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def1
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PrintfFormatString.h214
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td382
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def111
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/CMakeLists.txt12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td70
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td15
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td19
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td188
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h47
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Makefile41
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td (renamed from contrib/llvm/tools/clang/include/clang/AST/StmtNodes.td)11
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.inc.in6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td341
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h29
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/PathDiagnosticClients.h)4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h77
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/CodeGenAction.h)9
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Action.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Arg.h181
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h99
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Compilation.h13
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Makefile4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h3
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Option.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td110
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.def10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/AnalysisConsumer.h)18
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h (renamed from contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenOptions.h)16
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h67
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PCHBitCodes.h59
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PCHDeserializationListener.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PCHReader.h194
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PCHWriter.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h9
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def5
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Utils.h6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/CallGraph.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Entity.h4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Indexer.h7
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Pragma.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h8
-rw-r--r--contrib/llvm/tools/clang/include/clang/Makefile4
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Action.h305
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/AttributeList.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/DeclSpec.h10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h14
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Template.h2
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h (renamed from contrib/llvm/tools/clang/include/clang/Frontend/FixItRewriter.h)6
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h69
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h1
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h17
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h2
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp569
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CMakeLists.txt4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp161
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp282
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp124
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp47
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp119
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp429
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp471
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp89
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp972
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp132
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp34
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateName.cpp11
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp51
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp673
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Makefile9
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/AnalysisConsumer.cpp)37
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp124
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp93
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h3
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp15
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp201
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt30
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp525
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Environment.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp177
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h3
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h2
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/GRState.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/HTMLDiagnostics.cpp)6
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp454
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp256
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp179
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/PlistDiagnostics.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp301
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp339
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp125
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SVals.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SValuator.cpp8
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp166
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp619
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp204
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/Store.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp287
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp339
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h7
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp924
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h1
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp482
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h8
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp156
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp195
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp1656
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.h428
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp449
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp111
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp101
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp102
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp505
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp377
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp833
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h10
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp275
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp427
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h17
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp348
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp801
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h703
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp370
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h60
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h34
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h10
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Makefile9
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp442
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/Mangle.h12
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp1191
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp392
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Action.cpp5
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Arg.cpp189
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp111
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt4
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp19
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp135
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp58
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/OptTable.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Option.cpp106
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp203
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h18
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp537
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h52
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp7
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp10
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt12
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp593
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp235
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp54
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp46
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp621
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp609
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp1064
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp517
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp423
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp488
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp3
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp63
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt13
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/Makefile21
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/altivec.h5317
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/arm_neon.td341
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h7
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h16
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stddef.h7
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdint.h4
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h6
-rw-r--r--contrib/llvm/tools/clang/lib/Index/CallGraph.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Entity.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/Index/EntityImpl.h1
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Indexer.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Makefile12
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp17
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Makefile8
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp209
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp10
-rwxr-xr-xcontrib/llvm/tools/clang/lib/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp1
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt2
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp123
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp89
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp4
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp144
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h16
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp35
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp134
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h17
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/FixItRewriter.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp106
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/HTMLPrint.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/RewriteMacros.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/RewriteObjC.cpp)47
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp (renamed from contrib/llvm/tools/clang/lib/Frontend/RewriteTest.cpp)2
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp70
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Lookup.h5
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Makefile6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp45
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.h260
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp13
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp6
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp458
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp1033
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp320
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp1385
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp52
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp9
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp722
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp260
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp199
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.h10
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp446
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp105
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp667
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp170
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp663
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h31
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp12
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp29
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp87
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp372
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h297
-rw-r--r--contrib/llvm/tools/clang/runtime/Makefile (renamed from contrib/llvm/tools/clang/lib/Runtime/Makefile)11
-rw-r--r--contrib/llvm/tools/clang/tools/Makefile8
-rw-r--r--contrib/llvm/tools/clang/tools/c-index-test/Makefile7
-rw-r--r--contrib/llvm/tools/clang/tools/c-index-test/c-index-test.c47
-rw-r--r--contrib/llvm/tools/clang/tools/driver/CMakeLists.txt1
-rw-r--r--contrib/llvm/tools/clang/tools/driver/Makefile13
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp20
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp9
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CIndex.cpp41
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CIndexCodeCompletion.cpp5
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CIndexer.cpp1
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CMakeLists.txt3
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CXCursor.cpp8
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CXSourceLocation.h7
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/CXTypes.cpp39
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/Makefile11
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/libclang.darwin.exports3
-rw-r--r--contrib/llvm/tools/clang/tools/libclang/libclang.exports4
-rwxr-xr-xcontrib/llvm/tools/clang/tools/scan-build/ccc-analyzer3
-rwxr-xr-xcontrib/llvm/tools/clang/utils/FuzzTest340
-rwxr-xr-xcontrib/llvm/tools/clang/utils/TestUtils/pch-test.pl2
-rw-r--r--contrib/llvm/tools/edis/EDDisassembler.cpp2
-rw-r--r--contrib/llvm/tools/edis/Makefile2
-rw-r--r--contrib/llvm/tools/gold/gold-plugin.cpp155
-rw-r--r--contrib/llvm/tools/llc/llc.cpp12
-rw-r--r--contrib/llvm/tools/llvm-extract/llvm-extract.cpp1
-rw-r--r--contrib/llvm/tools/llvm-link/llvm-link.cpp20
-rw-r--r--contrib/llvm/tools/llvm-mc/Makefile1
-rw-r--r--contrib/llvm/tools/llvm-mc/llvm-mc.cpp5
-rw-r--r--contrib/llvm/tools/llvm-nm/llvm-nm.cpp3
-rw-r--r--contrib/llvm/tools/llvmc/plugins/Base/Base.td.in12
-rw-r--r--contrib/llvm/tools/lto/LTOCodeGenerator.cpp10
-rw-r--r--contrib/llvm/tools/opt/GraphPrinters.cpp2
-rw-r--r--contrib/llvm/tools/opt/PrintSCC.cpp2
-rw-r--r--contrib/llvm/tools/opt/opt.cpp48
-rw-r--r--contrib/llvm/utils/FileUpdate/FileUpdate.cpp1
-rwxr-xr-xcontrib/llvm/utils/NewNightlyTest.pl4
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp9
-rw-r--r--contrib/llvm/utils/TableGen/ARMDecoderEmitter.h2
-rw-r--r--contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp4
-rw-r--r--contrib/llvm/utils/TableGen/CMakeLists.txt4
-rw-r--r--contrib/llvm/utils/TableGen/ClangASTNodesEmitter.cpp116
-rw-r--r--contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h58
-rw-r--r--contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp84
-rw-r--r--contrib/llvm/utils/TableGen/ClangAttrEmitter.h49
-rw-r--r--contrib/llvm/utils/TableGen/CodeEmitterGen.cpp45
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenInstruction.cpp11
-rw-r--r--contrib/llvm/utils/TableGen/CodeGenTarget.cpp77
-rw-r--r--contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp5
-rw-r--r--contrib/llvm/utils/TableGen/EDEmitter.cpp13
-rw-r--r--contrib/llvm/utils/TableGen/FastISelEmitter.cpp40
-rw-r--r--contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp5
-rw-r--r--contrib/llvm/utils/TableGen/NeonEmitter.cpp1202
-rw-r--r--contrib/llvm/utils/TableGen/NeonEmitter.h122
-rw-r--r--contrib/llvm/utils/TableGen/Record.cpp29
-rw-r--r--contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp134
-rw-r--r--contrib/llvm/utils/TableGen/TGParser.cpp165
-rw-r--r--contrib/llvm/utils/TableGen/TGParser.h11
-rw-r--r--contrib/llvm/utils/TableGen/TableGen.cpp115
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp19
-rw-r--r--contrib/llvm/utils/TableGen/X86RecognizableInstr.h2
-rw-r--r--contrib/llvm/utils/buildit/GNUmakefile4
-rwxr-xr-xcontrib/llvm/utils/buildit/build_llvm73
-rw-r--r--contrib/llvm/utils/count/count.c6
-rw-r--r--contrib/llvm/utils/lit/lit/TestRunner.py40
-rw-r--r--contrib/llvm/utils/unittest/UnitTestMain/Makefile8
-rw-r--r--contrib/llvm/utils/unittest/googletest/Makefile7
-rw-r--r--contrib/llvm/utils/unittest/googletest/README.LLVM10
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest-death-test.cc849
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest-filepath.cc197
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest-port.cc541
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest-test-part.cc48
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest-typed-test.cc17
-rw-r--r--contrib/llvm/utils/unittest/googletest/gtest.cc2403
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h84
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-message.h18
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h23
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h43
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h65
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h14
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/gtest.h1097
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h96
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h22
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h1015
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h181
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h2
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h310
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h68
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h1212
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h165
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-tuple.h968
-rw-r--r--contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h18
-rw-r--r--etc/mtree/BSD.include.dist2
-rw-r--r--lib/clang/Makefile1
-rw-r--r--lib/clang/clang.build.mk38
-rw-r--r--lib/clang/include/Makefile9
-rw-r--r--lib/clang/include/clang/AST/Attrs.inc2
-rw-r--r--lib/clang/include/clang/AST/DeclNodes.inc2
-rw-r--r--lib/clang/include/clang/Basic/AttrList.inc2
-rw-r--r--lib/clang/include/clang/Basic/Version.inc10
-rw-r--r--lib/clang/include/clang/Basic/arm_neon.inc2
-rw-r--r--lib/clang/include/llvm/Config/AsmParsers.def28
-rw-r--r--lib/clang/include/llvm/Config/AsmPrinters.def30
-rw-r--r--lib/clang/include/llvm/Config/Disassemblers.def27
-rw-r--r--lib/clang/include/llvm/Config/Targets.def29
-rw-r--r--lib/clang/libclanganalysis/Makefile14
-rw-r--r--lib/clang/libclangast/Makefile54
-rw-r--r--lib/clang/libclangbasic/Makefile32
-rw-r--r--lib/clang/libclangchecker/Makefile107
-rw-r--r--lib/clang/libclangcodegen/Makefile51
-rw-r--r--lib/clang/libclangdriver/Makefile29
-rw-r--r--lib/clang/libclangfrontend/Makefile60
-rw-r--r--lib/clang/libclanglex/Makefile30
-rw-r--r--lib/clang/libclangparse/Makefile23
-rw-r--r--lib/clang/libclangrewrite/Makefile18
-rw-r--r--lib/clang/libclangsema/Makefile55
-rw-r--r--lib/clang/libllvmanalysis/Makefile61
-rw-r--r--lib/clang/libllvmarmasmparser/Makefile7
-rw-r--r--lib/clang/libllvmarmasmprinter/Makefile8
-rw-r--r--lib/clang/libllvmarmcodegen/Makefile50
-rw-r--r--lib/clang/libllvmarminfo/Makefile3
-rw-r--r--lib/clang/libllvmasmparser/Makefile10
-rw-r--r--lib/clang/libllvmasmprinter/Makefile9
-rw-r--r--lib/clang/libllvmbitreader/Makefile3
-rw-r--r--lib/clang/libllvmbitwriter/Makefile4
-rw-r--r--lib/clang/libllvmcodegen/Makefile108
-rw-r--r--lib/clang/libllvmcore/Makefile41
-rw-r--r--lib/clang/libllvminstcombine/Makefile18
-rw-r--r--lib/clang/libllvmipa/Makefile4
-rw-r--r--lib/clang/libllvmipo/Makefile29
-rw-r--r--lib/clang/libllvmmc/Makefile35
-rw-r--r--lib/clang/libllvmmcparser/Makefile8
-rw-r--r--lib/clang/libllvmmipsasmprinter/Makefile4
-rw-r--r--lib/clang/libllvmmipscodegen/Makefile29
-rw-r--r--lib/clang/libllvmmipsinfo/Makefile3
-rw-r--r--lib/clang/libllvmpowerpcasmprinter/Makefile4
-rw-r--r--lib/clang/libllvmpowerpccodegen/Makefile31
-rw-r--r--lib/clang/libllvmpowerpcinfo/Makefile3
-rw-r--r--lib/clang/libllvmscalaropts/Makefile38
-rw-r--r--lib/clang/libllvmselectiondag/Makefile27
-rw-r--r--lib/clang/libllvmsupport/Makefile56
-rw-r--r--lib/clang/libllvmsystem/Makefile23
-rw-r--r--lib/clang/libllvmtarget/Makefile17
-rw-r--r--lib/clang/libllvmtransformutils/Makefile33
-rw-r--r--lib/clang/libllvmx86asmparser/Makefile7
-rw-r--r--lib/clang/libllvmx86asmprinter/Makefile14
-rw-r--r--lib/clang/libllvmx86codegen/Makefile38
-rw-r--r--lib/clang/libllvmx86info/Makefile3
-rw-r--r--usr.bin/clang/clang/Makefile70
-rw-r--r--usr.bin/clang/tblgen/Makefile49
1153 files changed, 76398 insertions, 39682 deletions
diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc
index 990792a..b49e65b 100644
--- a/ObsoleteFiles.inc
+++ b/ObsoleteFiles.inc
@@ -14,6 +14,14 @@
# The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last.
#
+# 20100720: new clang import which bumps version from 2.0 to 2.8
+OLD_FILES+=usr/include/clang/2.0/emmintrin.h
+OLD_FILES+=usr/include/clang/2.0/mm_malloc.h
+OLD_FILES+=usr/include/clang/2.0/mmintrin.h
+OLD_FILES+=usr/include/clang/2.0/pmmintrin.h
+OLD_FILES+=usr/include/clang/2.0/tmmintrin.h
+OLD_FILES+=usr/include/clang/2.0/xmmintrin.h
+OLD_DIRS+=usr/include/clang/2.0
# 20100706: removed pc-sysinstall's detect-vmware.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh
# 20100701: [powerpc] removed <machine/intr.h>
diff --git a/contrib/llvm/CMakeLists.txt b/contrib/llvm/CMakeLists.txt
index 9d0180b..d4f2221 100644
--- a/contrib/llvm/CMakeLists.txt
+++ b/contrib/llvm/CMakeLists.txt
@@ -4,7 +4,7 @@ project(LLVM)
cmake_minimum_required(VERSION 2.6.1)
set(PACKAGE_NAME llvm)
-set(PACKAGE_VERSION 2.7svn)
+set(PACKAGE_VERSION 2.8svn)
set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
set(PACKAGE_BUGREPORT "llvmbugs@cs.uiuc.edu")
diff --git a/contrib/llvm/Makefile b/contrib/llvm/Makefile
index 670c174..d42f887 100644
--- a/contrib/llvm/Makefile
+++ b/contrib/llvm/Makefile
@@ -64,7 +64,7 @@ endif
ifeq ($(MAKECMDGOALS),install-clang)
DIRS := tools/clang/tools/driver tools/clang/lib/Headers \
- tools/clang/lib/Runtime tools/clang/docs
+ tools/clang/runtime tools/clang/docs
OPTIONAL_DIRS :=
NO_INSTALL = 1
endif
@@ -180,8 +180,8 @@ $(FilesToConfigPATH) : $(LLVM_OBJ_ROOT)/% : $(LLVM_SRC_ROOT)/%.in
# that it gets executed last.
ifneq ($(BUILD_DIRS_ONLY),1)
all::
- $(Echo) '*****' Completed $(BuildMode)$(AssertMode) Build
-ifeq ($(BuildMode),Debug)
+ $(Echo) '*****' Completed $(BuildMode) Build
+ifneq ($(ENABLE_OPTIMIZED),1)
$(Echo) '*****' Note: Debug build can be 10 times slower than an
$(Echo) '*****' optimized build. Use 'make ENABLE_OPTIMIZED=1' to
$(Echo) '*****' make an optimized build. Alternatively you can
diff --git a/contrib/llvm/Makefile.config.in b/contrib/llvm/Makefile.config.in
index ec11bb3..1d54b31 100644
--- a/contrib/llvm/Makefile.config.in
+++ b/contrib/llvm/Makefile.config.in
@@ -222,8 +222,8 @@ RDYNAMIC := @RDYNAMIC@
# When ENABLE_PROFILING is enabled, profile instrumentation is done
# and output is put into the "<Flavor>+Profile" directories, where
-# <Flavor> is either Debug or Release depending on how other builkd
-# flags are set.. Otherwise, output is put in the <Flavor>
+# <Flavor> is either Debug or Release depending on how other build
+# flags are set. Otherwise, output is put in the <Flavor>
# directories.
#ENABLE_PROFILING = 1
@ENABLE_PROFILING@
@@ -320,12 +320,6 @@ endif
# Location of the plugin header file for gold.
BINUTILS_INCDIR := @BINUTILS_INCDIR@
-C_INCLUDE_DIRS := @C_INCLUDE_DIRS@
-CXX_INCLUDE_ROOT := @CXX_INCLUDE_ROOT@
-CXX_INCLUDE_ARCH := @CXX_INCLUDE_ARCH@
-CXX_INCLUDE_32BIT_DIR = @CXX_INCLUDE_32BIT_DIR@
-CXX_INCLUDE_64BIT_DIR = @CXX_INCLUDE_64BIT_DIR@
-
# When ENABLE_LLVMC_DYNAMIC is enabled, LLVMC will link libCompilerDriver
# dynamically. This is needed to make dynamic plugins work on some targets
# (Windows).
@@ -344,5 +338,5 @@ NO_MISSING_FIELD_INITIALIZERS = @NO_MISSING_FIELD_INITIALIZERS@
NO_VARIADIC_MACROS = @NO_VARIADIC_MACROS@
# Flags supported by the linker.
-# bfd ld / gold -retain-symbols-file file
-HAVE_LINK_RETAIN_SYMBOLS_FILE = @HAVE_LINK_RETAIN_SYMBOLS_FILE@
+# bfd ld / gold --version-script=file
+HAVE_LINK_VERSION_SCRIPT = @HAVE_LINK_VERSION_SCRIPT@
diff --git a/contrib/llvm/Makefile.rules b/contrib/llvm/Makefile.rules
index 4085881..12582f6 100644
--- a/contrib/llvm/Makefile.rules
+++ b/contrib/llvm/Makefile.rules
@@ -42,7 +42,7 @@ VPATH=$(PROJ_SRC_DIR)
# Reset the list of suffixes we know how to build.
#--------------------------------------------------------------------
.SUFFIXES:
-.SUFFIXES: .c .cpp .cc .h .hpp .o .a .bc .td .ps .dot .ll
+.SUFFIXES: .c .cpp .cc .h .hpp .o .a .bc .td .ps .dot .ll .m .mm
.SUFFIXES: $(SHLIBEXT) $(SUFFIXES)
#--------------------------------------------------------------------
@@ -398,12 +398,11 @@ endif
# If DISABLE_ASSERTIONS=1 is specified (make command line or configured),
# then disable assertions by defining the appropriate preprocessor symbols.
-ifdef DISABLE_ASSERTIONS
- # Indicate that assertions are turned off using a minus sign
- BuildMode := $(BuildMode)-Asserts
- CPP.Defines += -DNDEBUG
-else
+ifndef DISABLE_ASSERTIONS
+ BuildMode := $(BuildMode)+Asserts
CPP.Defines += -D_DEBUG
+else
+ CPP.Defines += -DNDEBUG
endif
# If ENABLE_EXPENSIVE_CHECKS=1 is specified (make command line or
@@ -633,7 +632,12 @@ ifdef TOOLNAME
endif
endif
endif
+else
+ifneq ($(DARWIN_MAJVERS),4)
+ LD.Flags += $(RPATH) -Wl,@executable_path/../lib
endif
+endif
+
#----------------------------------------------------------
# Options To Invoke Tools
@@ -807,7 +811,8 @@ SubDirs += $(DIRS)
ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) for dir in $(DIRS); do \
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -829,7 +834,8 @@ endif
ifdef EXPERIMENTAL_DIRS
$(RecursiveTargets)::
$(Verb) for dir in $(EXPERIMENTAL_DIRS); do \
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -863,7 +869,9 @@ unitcheck:: $(addsuffix /.makeunitcheck,$(PARALLEL_DIRS))
ParallelTargets := $(foreach T,$(RecursiveTargets),%/.make$(T))
$(ParallelTargets) :
- $(Verb) if [ ! -f $(@D)/Makefile ]; then \
+ $(Verb) if ([ ! -f $(@D)/Makefile ] || \
+ command test $(@D)/Makefile -ot \
+ $(PROJ_SRC_DIR)/$(@D)/Makefile ); then \
$(MKDIR) $(@D); \
$(CP) $(PROJ_SRC_DIR)/$(@D)/Makefile $(@D)/Makefile; \
fi; \
@@ -882,7 +890,8 @@ ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) for dir in $(OPTIONAL_DIRS); do \
if [ -d $(PROJ_SRC_DIR)/$$dir ]; then\
- if [ ! -f $$dir/Makefile ]; then \
+ if ([ ! -f $$dir/Makefile ] || \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -930,7 +939,7 @@ endif
endif
###############################################################################
-# Set up variables for building libararies
+# Set up variables for building libraries
###############################################################################
#---------------------------------------------------------
@@ -986,12 +995,25 @@ ifeq ($(HOST_OS),Darwin)
# Darwin convention prefixes symbols with underscores.
NativeExportsFile := $(ObjDir)/$(notdir $(EXPORTED_SYMBOL_FILE)).sed
$(NativeExportsFile): $(EXPORTED_SYMBOL_FILE) $(ObjDir)/.dir
- $(Verb) sed -e 's/[[:<:]]/_/' < $< > $@
+ $(Verb) sed -e 's/^/_/' < $< > $@
+clean-local::
+ -$(Verb) $(RM) -f $(NativeExportsFile)
+else
+ifeq ($(HAVE_LINK_VERSION_SCRIPT),1)
+# Gold and BFD ld require a version script rather than a plain list.
+NativeExportsFile := $(ObjDir)/$(notdir $(EXPORTED_SYMBOL_FILE)).map
+$(NativeExportsFile): $(EXPORTED_SYMBOL_FILE) $(ObjDir)/.dir
+ $(Verb) echo "{" > $@
+ $(Verb) grep -q "\<" $< && echo " global:" >> $@ || :
+ $(Verb) sed -e 's/$$/;/' -e 's/^/ /' < $< >> $@
+ $(Verb) echo " local: *;" >> $@
+ $(Verb) echo "};" >> $@
clean-local::
-$(Verb) $(RM) -f $(NativeExportsFile)
else
NativeExportsFile := $(EXPORTED_SYMBOL_FILE)
endif
+endif
# Now add the linker command-line options to use the native export file.
@@ -1000,8 +1022,8 @@ LLVMLibsOptions += -Wl,-exported_symbols_list,$(NativeExportsFile)
endif
# gold, bfd ld, etc.
-ifeq ($(HAVE_LINK_RETAIN_SYMBOLS_FILE),1)
-LLVMLibsOptions += -Wl,-retain-symbols-file,$(NativeExportsFile)
+ifeq ($(HAVE_LINK_VERSION_SCRIPT),1)
+LLVMLibsOptions += -Wl,--version-script,$(NativeExportsFile)
endif
endif
@@ -1113,7 +1135,7 @@ $(LibName.SO): $(ObjectsO) $(ProjLibsPaths) $(LLVMLibsPaths) $(LibDir)/.dir
$(ProjLibsOptions) $(LLVMLibsOptions) $(LIBS)
else
$(LibName.SO): $(ObjectsO) $(LibDir)/.dir
- $(Echo) Linking $(BuildMode) Shared Library $(LIBRARYNAME)$(SHLIBEXT)
+ $(Echo) Linking $(BuildMode) Shared Library $(basename $@)
$(Verb) $(Link) $(SharedLinkOptions) -o $@ $(ObjectsO)
endif
@@ -1425,6 +1447,11 @@ $(ObjDir)/%.o: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
$(DEPEND_MOVEFILE)
+$(ObjDir)/%.o: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
+ $(Echo) "Compiling $*.mm for $(BuildMode) build" $(PIC_FLAG)
+ $(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
+ $(DEPEND_MOVEFILE)
+
$(ObjDir)/%.o: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Echo) "Compiling $*.cc for $(BuildMode) build" $(PIC_FLAG)
$(Verb) if $(Compile.CXX) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
@@ -1435,6 +1462,11 @@ $(ObjDir)/%.o: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
$(Verb) if $(Compile.C) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
$(DEPEND_MOVEFILE)
+$(ObjDir)/%.o: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(PROJ_SRC_DIR)/Makefile
+ $(Echo) "Compiling $*.m for $(BuildMode) build" $(PIC_FLAG)
+ $(Verb) if $(Compile.C) $(DEPEND_OPTIONS) $< -o $(ObjDir)/$*.o ; \
+ $(DEPEND_MOVEFILE)
+
#---------------------------------------------------------
# Create .bc files in the ObjDir directory from .cpp .cc and .c files...
#---------------------------------------------------------
@@ -1453,6 +1485,12 @@ $(ObjDir)/%.ll: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
$(BC_DEPEND_MOVEFILE)
+$(ObjDir)/%.ll: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build (bytecode)"
+ $(Verb) if $(BCCompile.CXX) $(BC_DEPEND_OPTIONS) \
+ $< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
+ $(BC_DEPEND_MOVEFILE)
+
$(ObjDir)/%.ll: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cc for $(BuildMode) build (bytecode)"
$(Verb) if $(BCCompile.CXX) $(BC_DEPEND_OPTIONS) \
@@ -1465,6 +1503,12 @@ $(ObjDir)/%.ll: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
$< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
$(BC_DEPEND_MOVEFILE)
+$(ObjDir)/%.ll: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
+ $(Echo) "Compiling $*.m for $(BuildMode) build (bytecode)"
+ $(Verb) if $(BCCompile.C) $(BC_DEPEND_OPTIONS) \
+ $< -o $(ObjDir)/$*.ll -S -emit-llvm ; \
+ $(BC_DEPEND_MOVEFILE)
+
# Provide alternate rule sets if dependencies are disabled
else
@@ -1472,6 +1516,10 @@ $(ObjDir)/%.o: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@
+$(ObjDir)/%.o: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.CXX) $< -o $@
+
$(ObjDir)/%.o: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@
@@ -1480,10 +1528,18 @@ $(ObjDir)/%.o: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c for $(BuildMode) build" $(PIC_FLAG)
$(Compile.C) $< -o $@
+$(ObjDir)/%.o: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.C) $< -o $@
+
$(ObjDir)/%.ll: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cpp for $(BuildMode) build (bytecode)"
$(BCCompile.CXX) $< -o $@ -S -emit-llvm
+$(ObjDir)/%.ll: %.mm $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build (bytecode)"
+ $(BCCompile.CXX) $< -o $@ -S -emit-llvm
+
$(ObjDir)/%.ll: %.cc $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCXX)
$(Echo) "Compiling $*.cc for $(BuildMode) build (bytecode)"
$(BCCompile.CXX) $< -o $@ -S -emit-llvm
@@ -1492,6 +1548,10 @@ $(ObjDir)/%.ll: %.c $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
$(Echo) "Compiling $*.c for $(BuildMode) build (bytecode)"
$(BCCompile.C) $< -o $@ -S -emit-llvm
+$(ObjDir)/%.ll: %.m $(ObjDir)/.dir $(BUILT_SOURCES) $(LLVMCC)
+ $(Echo) "Compiling $*.m for $(BuildMode) build (bytecode)"
+ $(BCCompile.C) $< -o $@ -S -emit-llvm
+
endif
@@ -1500,6 +1560,10 @@ $(BuildMode)/%.ii: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp for $(BuildMode) build to .ii file"
$(Verb) $(Preprocess.CXX) $< -o $@
+$(BuildMode)/%.ii: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm for $(BuildMode) build to .ii file"
+ $(Verb) $(Preprocess.CXX) $< -o $@
+
$(BuildMode)/%.ii: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc for $(BuildMode) build to .ii file"
$(Verb) $(Preprocess.CXX) $< -o $@
@@ -1508,11 +1572,19 @@ $(BuildMode)/%.i: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c for $(BuildMode) build to .i file"
$(Verb) $(Preprocess.C) $< -o $@
+$(BuildMode)/%.i: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m for $(BuildMode) build to .i file"
+ $(Verb) $(Preprocess.C) $< -o $@
+
$(ObjDir)/%.s: %.cpp $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cpp to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@ -S
+$(ObjDir)/%.s: %.mm $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.mm to asm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.CXX) $< -o $@ -S
+
$(ObjDir)/%.s: %.cc $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.cc to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.CXX) $< -o $@ -S
@@ -1521,6 +1593,10 @@ $(ObjDir)/%.s: %.c $(ObjDir)/.dir $(BUILT_SOURCES)
$(Echo) "Compiling $*.c to asm for $(BuildMode) build" $(PIC_FLAG)
$(Compile.C) $< -o $@ -S
+$(ObjDir)/%.s: %.m $(ObjDir)/.dir $(BUILT_SOURCES)
+ $(Echo) "Compiling $*.m to asm for $(BuildMode) build" $(PIC_FLAG)
+ $(Compile.C) $< -o $@ -S
+
# make the C and C++ compilers strip debug info out of bytecode libraries.
ifdef DEBUG_RUNTIME
@@ -1733,7 +1809,7 @@ ifndef DISABLE_AUTO_DEPENDENCIES
ifndef IS_CLEANING_TARGET
# Get the list of dependency files
-DependSourceFiles := $(basename $(filter %.cpp %.c %.cc, $(Sources)))
+DependSourceFiles := $(basename $(filter %.cpp %.c %.cc %.m %.mm, $(Sources)))
DependFiles := $(DependSourceFiles:%=$(PROJ_OBJ_DIR)/$(BuildMode)/%.d)
# Include bitcode dependency files if using bitcode libraries
diff --git a/contrib/llvm/autoconf/configure.ac b/contrib/llvm/autoconf/configure.ac
index 8487d94..be320cf 100644
--- a/contrib/llvm/autoconf/configure.ac
+++ b/contrib/llvm/autoconf/configure.ac
@@ -1039,8 +1039,8 @@ AC_LINK_USE_R
dnl Determine whether the linker supports the -export-dynamic option.
AC_LINK_EXPORT_DYNAMIC
-dnl Determine whether the linker supports the -retain-symbols-file option.
-AC_LINK_RETAIN_SYMBOLS_FILE
+dnl Determine whether the linker supports the --version-script option.
+AC_LINK_VERSION_SCRIPT
dnl Check for libtool and the library that has dlopen function (which must come
dnl before the AC_PROG_LIBTOOL check in order to enable dlopening libraries with
@@ -1284,6 +1284,9 @@ if test "$llvm_cv_enable_libffi" = "yes" ; then
AC_CHECK_HEADERS([ffi.h ffi/ffi.h])
fi
+dnl Try to find Darwin specific crash reporting library.
+AC_CHECK_HEADERS([CrashReporterClient.h])
+
dnl===-----------------------------------------------------------------------===
dnl===
dnl=== SECTION 7: Check for types and structures
diff --git a/contrib/llvm/autoconf/m4/link_options.m4 b/contrib/llvm/autoconf/m4/link_options.m4
index 697abab..b48710c 100644
--- a/contrib/llvm/autoconf/m4/link_options.m4
+++ b/contrib/llvm/autoconf/m4/link_options.m4
@@ -40,14 +40,14 @@ if test "$llvm_cv_link_use_export_dynamic" = yes ; then
])
#
-# Determine if the system can handle the -retain-symbols-file option being
+# Determine if the system can handle the --version-script option being
# passed to the linker.
#
# This macro is specific to LLVM.
#
-AC_DEFUN([AC_LINK_RETAIN_SYMBOLS_FILE],
-[AC_CACHE_CHECK([for compiler -Wl,-retain-symbols-file option],
- [llvm_cv_link_use_retain_symbols_file],
+AC_DEFUN([AC_LINK_VERSION_SCRIPT],
+[AC_CACHE_CHECK([for compiler -Wl,--version-script option],
+ [llvm_cv_link_use_version_script],
[ AC_LANG_PUSH([C])
oldcflags="$CFLAGS"
@@ -67,18 +67,21 @@ AC_DEFUN([AC_LINK_RETAIN_SYMBOLS_FILE],
(umask 077 && mkdir "$tmp")
} || exit $?
- echo "main" > "$tmp/exports"
+ echo "{" > "$tmp/export.map"
+ echo " global: main;" >> "$tmp/export.map"
+ echo " local: *;" >> "$tmp/export.map"
+ echo "};" >> "$tmp/export.map"
- CFLAGS="$CFLAGS -Wl,-retain-symbols-file=$tmp/exports"
+ CFLAGS="$CFLAGS -Wl,--version-script=$tmp/export.map"
AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],
- [llvm_cv_link_use_retain_symbols_file=yes],[llvm_cv_link_use_retain_symbols_file=no])
- rm "$tmp/exports"
+ [llvm_cv_link_use_version_script=yes],[llvm_cv_link_use_version_script=no])
+ rm "$tmp/export.map"
rmdir "$tmp"
CFLAGS="$oldcflags"
AC_LANG_POP([C])
])
-if test "$llvm_cv_link_use_retain_symbols_file" = yes ; then
- AC_SUBST(HAVE_LINK_RETAIN_SYMBOLS_FILE,1)
+if test "$llvm_cv_link_use_version_script" = yes ; then
+ AC_SUBST(HAVE_LINK_VERSION_SCRIPT,1)
fi
])
diff --git a/contrib/llvm/bindings/ada/llvm/llvm.ads b/contrib/llvm/bindings/ada/llvm/llvm.ads
index d9820f1..ce74e67 100644
--- a/contrib/llvm/bindings/ada/llvm/llvm.ads
+++ b/contrib/llvm/bindings/ada/llvm/llvm.ads
@@ -316,7 +316,8 @@ package llvm is
LLVMExternalWeakLinkage,
LLVMGhostLinkage,
LLVMCommonLinkage,
- LLVMLinkerPrivateLinkage);
+ LLVMLinkerPrivateLinkage,
+ LLVMLinkerPrivateWeakLinkage);
for LLVMLinkage use
(LLVMExternalLinkage => 0,
@@ -333,7 +334,8 @@ package llvm is
LLVMExternalWeakLinkage => 11,
LLVMGhostLinkage => 12,
LLVMCommonLinkage => 13,
- LLVMLinkerPrivateLinkage => 14);
+ LLVMLinkerPrivateLinkage => 14,
+ LLVMLinkerPrivateWeakLinkage => 15);
pragma Convention (C, LLVMLinkage);
diff --git a/contrib/llvm/configure b/contrib/llvm/configure
index 755746f..dc1b5b3 100755
--- a/contrib/llvm/configure
+++ b/contrib/llvm/configure
@@ -752,7 +752,7 @@ OCAMLOPT
OCAMLDEP
OCAMLDOC
GAS
-HAVE_LINK_RETAIN_SYMBOLS_FILE
+HAVE_LINK_VERSION_SCRIPT
INSTALL_LTDL_TRUE
INSTALL_LTDL_FALSE
CONVENIENCE_LTDL_TRUE
@@ -8905,9 +8905,9 @@ _ACEOF
fi
-{ echo "$as_me:$LINENO: checking for compiler -Wl,-retain-symbols-file option" >&5
-echo $ECHO_N "checking for compiler -Wl,-retain-symbols-file option... $ECHO_C" >&6; }
-if test "${llvm_cv_link_use_retain_symbols_file+set}" = set; then
+{ echo "$as_me:$LINENO: checking for compiler -Wl,--version-script option" >&5
+echo $ECHO_N "checking for compiler -Wl,--version-script option... $ECHO_C" >&6; }
+if test "${llvm_cv_link_use_version_script+set}" = set; then
echo $ECHO_N "(cached) $ECHO_C" >&6
else
ac_ext=c
@@ -8934,9 +8934,12 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
(umask 077 && mkdir "$tmp")
} || exit $?
- echo "main" > "$tmp/exports"
+ echo "{" > "$tmp/export.map"
+ echo " global: main;" >> "$tmp/export.map"
+ echo " local: *;" >> "$tmp/export.map"
+ echo "};" >> "$tmp/export.map"
- CFLAGS="$CFLAGS -Wl,-retain-symbols-file=$tmp/exports"
+ CFLAGS="$CFLAGS -Wl,--version-script=$tmp/export.map"
cat >conftest.$ac_ext <<_ACEOF
/* confdefs.h. */
_ACEOF
@@ -8986,17 +8989,17 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }; }; then
- llvm_cv_link_use_retain_symbols_file=yes
+ llvm_cv_link_use_version_script=yes
else
echo "$as_me: failed program was:" >&5
sed 's/^/| /' conftest.$ac_ext >&5
- llvm_cv_link_use_retain_symbols_file=no
+ llvm_cv_link_use_version_script=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
- rm "$tmp/exports"
+ rm "$tmp/export.map"
rmdir "$tmp"
CFLAGS="$oldcflags"
ac_ext=c
@@ -9007,10 +9010,10 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
fi
-{ echo "$as_me:$LINENO: result: $llvm_cv_link_use_retain_symbols_file" >&5
-echo "${ECHO_T}$llvm_cv_link_use_retain_symbols_file" >&6; }
-if test "$llvm_cv_link_use_retain_symbols_file" = yes ; then
- HAVE_LINK_RETAIN_SYMBOLS_FILE=1
+{ echo "$as_me:$LINENO: result: $llvm_cv_link_use_version_script" >&5
+echo "${ECHO_T}$llvm_cv_link_use_version_script" >&6; }
+if test "$llvm_cv_link_use_version_script" = yes ; then
+ HAVE_LINK_VERSION_SCRIPT=1
fi
@@ -11384,7 +11387,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
-#line 11387 "configure"
+#line 11390 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -16947,6 +16950,176 @@ done
fi
+for ac_header in CrashReporterClient.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ { echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_compile") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } &&
+ { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; } &&
+ { ac_try='test -s conftest.$ac_objext'
+ { (case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_try") 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); }; }; then
+ ac_header_compiler=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h. */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h. */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+ *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+ *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+ (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+ ac_status=$?
+ grep -v '^ *+' conftest.er1 >conftest.err
+ rm -f conftest.er1
+ cat conftest.err >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } >/dev/null; then
+ if test -s conftest.err; then
+ ac_cpp_err=$ac_c_preproc_warn_flag
+ ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+ else
+ ac_cpp_err=
+ fi
+else
+ ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+ ac_header_preproc=yes
+else
+ echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+ ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+ yes:no: )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+ ac_header_preproc=yes
+ ;;
+ no:yes:* )
+ { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5
+echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+ { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+ ( cat <<\_ASBOX
+## ----------------------------------- ##
+## Report this to llvmbugs@cs.uiuc.edu ##
+## ----------------------------------- ##
+_ASBOX
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+ echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+ eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+ { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+ cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
{ echo "$as_me:$LINENO: checking for HUGE_VAL sanity" >&5
@@ -21297,7 +21470,7 @@ OCAMLOPT!$OCAMLOPT$ac_delim
OCAMLDEP!$OCAMLDEP$ac_delim
OCAMLDOC!$OCAMLDOC$ac_delim
GAS!$GAS$ac_delim
-HAVE_LINK_RETAIN_SYMBOLS_FILE!$HAVE_LINK_RETAIN_SYMBOLS_FILE$ac_delim
+HAVE_LINK_VERSION_SCRIPT!$HAVE_LINK_VERSION_SCRIPT$ac_delim
INSTALL_LTDL_TRUE!$INSTALL_LTDL_TRUE$ac_delim
INSTALL_LTDL_FALSE!$INSTALL_LTDL_FALSE$ac_delim
CONVENIENCE_LTDL_TRUE!$CONVENIENCE_LTDL_TRUE$ac_delim
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
index d665c89..117f2d6 100644
--- a/contrib/llvm/include/llvm-c/Core.h
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -226,7 +226,8 @@ typedef enum {
LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */
LLVMGhostLinkage, /**< Obsolete */
LLVMCommonLinkage, /**< Tentative definitions */
- LLVMLinkerPrivateLinkage /**< Like Private, but linker removes. */
+ LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */
+ LLVMLinkerPrivateWeakLinkage /**< Like LinkerPrivate, but is weak. */
} LLVMLinkage;
typedef enum {
diff --git a/contrib/llvm/include/llvm-c/Target.h b/contrib/llvm/include/llvm-c/Target.h
index 2948fc7..b1b9f36 100644
--- a/contrib/llvm/include/llvm-c/Target.h
+++ b/contrib/llvm/include/llvm-c/Target.h
@@ -32,7 +32,8 @@ typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
typedef struct LLVMStructLayout *LLVMStructLayoutRef;
/* Declare all of the target-initialization functions that are available. */
-#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo(void);
+#define LLVM_TARGET(TargetName) \
+ void LLVMInitialize##TargetName##TargetInfo(void);
#include "llvm/Config/Targets.def"
#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
diff --git a/contrib/llvm/include/llvm-c/lto.h b/contrib/llvm/include/llvm-c/lto.h
index 7cafcb2..93f3760 100644
--- a/contrib/llvm/include/llvm-c/lto.h
+++ b/contrib/llvm/include/llvm-c/lto.h
@@ -102,7 +102,7 @@ lto_module_is_object_file_in_memory(const void* mem, size_t length);
*/
extern bool
lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
- const char* target_triple_prefix);
+ const char* target_triple_prefix);
/**
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
index 3cccc81..dfe4e0f 100644
--- a/contrib/llvm/include/llvm/ADT/APFloat.h
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -179,7 +179,7 @@ namespace llvm {
// Constructors.
APFloat(const fltSemantics &); // Default construct to 0.0
- APFloat(const fltSemantics &, const StringRef &);
+ APFloat(const fltSemantics &, StringRef);
APFloat(const fltSemantics &, integerPart);
APFloat(const fltSemantics &, fltCategory, bool negative);
APFloat(const fltSemantics &, uninitializedTag);
@@ -282,7 +282,7 @@ namespace llvm {
bool, roundingMode);
opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
bool, roundingMode);
- opStatus convertFromString(const StringRef&, roundingMode);
+ opStatus convertFromString(StringRef, roundingMode);
APInt bitcastToAPInt() const;
double convertToDouble() const;
float convertToFloat() const;
@@ -386,8 +386,8 @@ namespace llvm {
roundingMode, bool *) const;
opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
roundingMode);
- opStatus convertFromHexadecimalString(const StringRef&, roundingMode);
- opStatus convertFromDecimalString (const StringRef&, roundingMode);
+ opStatus convertFromHexadecimalString(StringRef, roundingMode);
+ opStatus convertFromDecimalString(StringRef, roundingMode);
char *convertNormalToHexString(char *, unsigned int, bool,
roundingMode) const;
opStatus roundSignificandWithExponent(const integerPart *, unsigned int,
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
index ec76fbd..59e023b 100644
--- a/contrib/llvm/include/llvm/ADT/APInt.h
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -162,7 +162,7 @@ class APInt {
///
/// @param radix 2, 8, 10, or 16
/// @brief Convert a char array into an APInt
- void fromString(unsigned numBits, const StringRef &str, uint8_t radix);
+ void fromString(unsigned numBits, StringRef str, uint8_t radix);
/// This is used by the toString method to divide by the radix. It simply
/// provides a more convenient form of divide for internal use since KnuthDiv
@@ -248,7 +248,7 @@ public:
/// @param str the string to be interpreted
/// @param radix the radix to use for the conversion
/// @brief Construct an APInt from a string representation.
- APInt(unsigned numBits, const StringRef &str, uint8_t radix);
+ APInt(unsigned numBits, StringRef str, uint8_t radix);
/// Simply makes *this a copy of that.
/// @brief Copy Constructor.
@@ -1153,7 +1153,7 @@ public:
/// This method determines how many bits are required to hold the APInt
/// equivalent of the string given by \arg str.
/// @brief Get bits required for string value.
- static unsigned getBitsNeeded(const StringRef& str, uint8_t radix);
+ static unsigned getBitsNeeded(StringRef str, uint8_t radix);
/// countLeadingZeros - This function is an APInt version of the
/// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number
diff --git a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000..99ed15c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,75 @@
+//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <vector>
+#include <set>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+/// P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+public:
+ typedef unsigned change_ty;
+ typedef std::pair<change_ty, change_ty> edge_ty;
+
+ // FIXME: Use a decent data structure.
+ typedef std::set<change_ty> changeset_ty;
+ typedef std::vector<changeset_ty> changesetlist_ty;
+
+public:
+ virtual ~DAGDeltaAlgorithm() {}
+
+ /// Run - Minimize the DAG formed by the \arg Changes vertices and the \arg
+ /// Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// changes and returning the smallest set which still satisfies the test
+ /// predicate and the input \arg Dependencies.
+ ///
+ /// \param Changes The list of changes.
+ ///
+ /// \param Dependencies The list of dependencies amongst changes. For each
+ /// (x,y) in \arg Dependencies, both x and y must be in \arg Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S, x
+ /// \in S implies y \in S. It is an error to have cyclic dependencies.
+ changeset_ty Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies);
+
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
index 5c99473..c53e255 100644
--- a/contrib/llvm/include/llvm/ADT/DenseMap.h
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -22,6 +22,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <cstddef>
#include <cstring>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
index 91a1429..07a5edf 100644
--- a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
+++ b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
@@ -169,7 +169,7 @@ public:
/// getOrInsertLeaderValue - Return the leader for the specified value that is
/// in the set. If the member is not in the set, it is inserted, then
/// returned.
- const ElemTy &getOrInsertLeaderValue(const ElemTy &V) const {
+ const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
member_iterator MI = findLeader(insert(V));
assert(MI != member_end() && "Value is not in the set!");
return *MI;
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
index e8979bb..fc8490a 100644
--- a/contrib/llvm/include/llvm/ADT/FoldingSet.h
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -166,6 +166,14 @@ public:
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(Node *N) {
+ Node *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
@@ -196,6 +204,10 @@ protected:
template<typename T> struct FoldingSetTrait {
static inline void Profile(const T& X, FoldingSetNodeID& ID) { X.Profile(ID);}
static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); }
+ template <typename Ctx>
+ static inline void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
};
//===--------------------------------------------------------------------===//
@@ -322,6 +334,77 @@ public:
};
//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet : public FoldingSetImpl {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+private:
+ Ctx Context;
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ virtual void GetNodeProfile(FoldingSetNodeID &ID,
+ FoldingSetImpl::Node *N) const {
+ T *TN = static_cast<T *>(N);
+
+ // We must use explicit template arguments in case Ctx is a
+ // reference type.
+ FoldingSetTrait<T>::template Profile<Ctx>(*TN, ID, Context);
+ }
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize), Context(Context)
+ {}
+
+ Ctx getContext() const { return Context; }
+
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N'
+ /// and return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it
+ /// exists, return it. If not, return the insertion token that will
+ /// make insertion faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h b/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
index f33fb1e..7aa3155 100644
--- a/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
+++ b/contrib/llvm/include/llvm/ADT/ImmutableIntervalMap.h
@@ -125,9 +125,11 @@ private:
key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(Add_internal(V, this->Left(T)), this->Value(T), this->Right(T));
+ return this->Balance(Add_internal(V, this->Left(T)), this->Value(T),
+ this->Right(T));
else
- return this->Balance(this->Left(T), this->Value(T), Add_internal(V, this->Right(T)));
+ return this->Balance(this->Left(T), this->Value(T),
+ Add_internal(V, this->Right(T)));
}
// Remove all overlaps from T.
@@ -150,9 +152,11 @@ private:
// If current key does not overlap the inserted key.
if (CurrentK.getStart() > K.getEnd())
- return this->Balance(RemoveOverlap(this->Left(T), K, Changed), this->Value(T), this->Right(T));
+ return this->Balance(RemoveOverlap(this->Left(T), K, Changed),
+ this->Value(T), this->Right(T));
else if (CurrentK.getEnd() < K.getStart())
- return this->Balance(this->Left(T), this->Value(T), RemoveOverlap(this->Right(T), K, Changed));
+ return this->Balance(this->Left(T), this->Value(T),
+ RemoveOverlap(this->Right(T), K, Changed));
// Current key overlaps with the inserted key.
// Remove the current key.
diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
index 8315bc9..47e5b2b 100644
--- a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
+++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -19,7 +19,6 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <set>
-#include <stack>
#include <vector>
namespace llvm {
@@ -52,21 +51,21 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
// VisitStack - Used to maintain the ordering. Top = current block
// First element is basic block pointer, second is the 'next child' to visit
- std::stack<std::pair<NodeType *, ChildItTy> > VisitStack;
+ std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
void traverseChild() {
- while (VisitStack.top().second != GT::child_end(VisitStack.top().first)) {
- NodeType *BB = *VisitStack.top().second++;
+ while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+ NodeType *BB = *VisitStack.back().second++;
if (!this->Visited.count(BB)) { // If the block is not visited...
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
inline po_iterator() {} // End is when stack is empty.
@@ -75,7 +74,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
po_iterator_storage<SetType, ExtStorage>(S) {
if(!S.count(BB)) {
this->Visited.insert(BB);
- VisitStack.push(std::make_pair(BB, GT::child_begin(BB)));
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
}
@@ -102,7 +101,7 @@ public:
inline bool operator!=(const _Self& x) const { return !operator==(x); }
inline pointer operator*() const {
- return VisitStack.top().first;
+ return VisitStack.back().first;
}
// This is a nonstandard operator-> that dereferences the pointer an extra
@@ -112,7 +111,7 @@ public:
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
- VisitStack.pop();
+ VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
return *this;
diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h
index fab133a..bf8286c 100644
--- a/contrib/llvm/include/llvm/ADT/SetVector.h
+++ b/contrib/llvm/include/llvm/ADT/SetVector.h
@@ -143,6 +143,14 @@ public:
vector_.pop_back();
}
+ bool operator==(const SetVector &that) const {
+ return vector_ == that.vector_;
+ }
+
+ bool operator!=(const SetVector &that) const {
+ return vector_ != that.vector_;
+ }
+
private:
set_type set_; ///< The set.
vector_type vector_; ///< The vector.
diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
index ef08125..424bdba 100644
--- a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
+++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -46,8 +46,10 @@ class SmallPtrSetIteratorImpl;
class SmallPtrSetImpl {
friend class SmallPtrSetIteratorImpl;
protected:
- /// CurArray - This is the current set of buckets. If it points to
- /// SmallArray, then the set is in 'small mode'.
+ /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+ const void **SmallArray;
+ /// CurArray - This is the current set of buckets. If equal to SmallArray,
+ /// then the set is in 'small mode'.
const void **CurArray;
/// CurArraySize - The allocated size of CurArray, always a power of two.
/// Note that CurArray points to an array that has CurArraySize+1 elements in
@@ -57,15 +59,13 @@ protected:
// If small, this is # elts allocated consequtively
unsigned NumElements;
unsigned NumTombstones;
- const void *SmallArray[1]; // Must be last ivar.
// Helper to copy construct a SmallPtrSet.
- SmallPtrSetImpl(const SmallPtrSetImpl& that);
- explicit SmallPtrSetImpl(unsigned SmallSize) {
+ SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl& that);
+ explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) :
+ SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
"Initial size must be a power of two!");
- CurArray = &SmallArray[0];
- CurArraySize = SmallSize;
// The end pointer, always valid, is set to a valid element to help the
// iterator.
CurArray[SmallSize] = 0;
@@ -123,7 +123,7 @@ protected:
}
private:
- bool isSmall() const { return CurArray == &SmallArray[0]; }
+ bool isSmall() const { return CurArray == SmallArray; }
unsigned Hash(const void *Ptr) const {
return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
@@ -199,29 +199,29 @@ public:
}
};
-/// NextPowerOfTwo - This is a helper template that rounds N up to the next
-/// power of two.
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
-struct NextPowerOfTwo;
+struct RoundUpToPowerOfTwo;
-/// NextPowerOfTwoH - If N is not a power of two, increase it. This is a helper
-/// template used to implement NextPowerOfTwo.
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
-struct NextPowerOfTwoH {
+struct RoundUpToPowerOfTwoH {
enum { Val = N };
};
template<unsigned N>
-struct NextPowerOfTwoH<N, false> {
+struct RoundUpToPowerOfTwoH<N, false> {
enum {
// We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
// the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
- Val = NextPowerOfTwo<(N|(N-1)) + 1>::Val
+ Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
};
};
template<unsigned N>
-struct NextPowerOfTwo {
- enum { Val = NextPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+struct RoundUpToPowerOfTwo {
+ enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
@@ -232,16 +232,17 @@ struct NextPowerOfTwo {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl {
// Make sure that SmallSize is a power of two, round up if not.
- enum { SmallSizePowTwo = NextPowerOfTwo<SmallSize>::Val };
- void *SmallArray[SmallSizePowTwo];
+ enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+ /// SmallStorage - Fixed size storage used in 'small mode'. The extra element
+ /// ensures that the end iterator actually points to valid memory.
+ const void *SmallStorage[SmallSizePowTwo+1];
typedef PointerLikeTypeTraits<PtrType> PtrTraits;
public:
- SmallPtrSet() : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {}
- SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(that) {}
+ SmallPtrSet() : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {}
+ SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(SmallStorage, that) {}
template<typename It>
- SmallPtrSet(It I, It E)
- : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {
+ SmallPtrSet(It I, It E) : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {
insert(I, E);
}
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
index 18c8619..fa61d20 100644
--- a/contrib/llvm/include/llvm/ADT/SmallVector.h
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -17,6 +17,8 @@
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
+#include <cstddef>
+#include <cstdlib>
#include <cstring>
#include <memory>
@@ -70,35 +72,35 @@ protected:
#endif
} FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
-
+
protected:
SmallVectorBase(size_t Size)
: BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {}
-
+
/// isSmall - Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
-
+
/// size_in_bytes - This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
-
+
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
-
+
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like datatypes and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
-
+
public:
bool empty() const { return BeginX == EndX; }
};
-
+
template <typename T>
class SmallVectorTemplateCommon : public SmallVectorBase {
@@ -106,21 +108,21 @@ protected:
void setEnd(T *P) { this->EndX = P; }
public:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {}
-
+
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T value_type;
typedef T *iterator;
typedef const T *const_iterator;
-
+
typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
-
+
typedef T &reference;
typedef const T &const_reference;
typedef T *pointer;
typedef const T *const_pointer;
-
+
// forward iterator creation methods.
iterator begin() { return (iterator)this->BeginX; }
const_iterator begin() const { return (const_iterator)this->BeginX; }
@@ -130,7 +132,7 @@ protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
-
+
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
@@ -139,16 +141,16 @@ public:
size_type size() const { return end()-begin(); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
-
+
/// capacity - Return the total number of elements in the currently allocated
/// buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
-
+
/// data - Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
/// data - Return a pointer to the vector's buffer, even if empty().
const_pointer data() const { return const_pointer(begin()); }
-
+
reference operator[](unsigned idx) {
assert(begin() + idx < end());
return begin()[idx];
@@ -172,7 +174,7 @@ public:
return end()[-1];
}
};
-
+
/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
/// implementations that are designed to work with non-POD-like T's.
template <typename T, bool isPodLike>
@@ -186,14 +188,14 @@ public:
E->~T();
}
}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
-
+
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0);
@@ -207,34 +209,34 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t NewCapacity = 2*CurCapacity;
if (NewCapacity < MinSize)
NewCapacity = MinSize;
- T *NewElts = static_cast<T*>(operator new(NewCapacity*sizeof(T)));
-
+ T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+
// Copy the elements over.
this->uninitialized_copy(this->begin(), this->end(), NewElts);
-
+
// Destroy the original elements.
destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
-
+ free(this->begin());
+
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
}
-
-
+
+
/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
/// implementations that are designed to work with POD-like T's.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
public:
SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
-
+
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
-
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
@@ -259,33 +261,35 @@ public:
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
};
-
-
+
+
/// SmallVectorImpl - This class consists of common code factored out of the
/// SmallVector class to reduce code duplication based on the SmallVector 'N'
/// template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
+
+ SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
-
+
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
-
+
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
-
+
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
- operator delete(this->begin());
+ free(this->begin());
}
-
-
+
+
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
@@ -319,7 +323,7 @@ public:
if (this->capacity() < N)
this->grow(N);
}
-
+
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
@@ -330,21 +334,21 @@ public:
this->grow();
goto Retry;
}
-
+
void pop_back() {
this->setEnd(this->end()-1);
this->end()->~T();
}
-
+
T pop_back_val() {
T Result = this->back();
pop_back();
return Result;
}
-
-
+
+
void swap(SmallVectorImpl &RHS);
-
+
/// append - Add the specified range to the end of the SmallVector.
///
template<typename in_iter>
@@ -353,26 +357,26 @@ public:
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
// TODO: NEED To compile time dispatch on whether in_iter is a random access
// iterator to use the fast uninitialized_copy.
std::uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
}
-
+
/// append - Add the specified range to the end of the SmallVector.
///
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
this->grow(this->size()+NumInputs);
-
+
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
}
-
+
void assign(unsigned NumElts, const T &Elt) {
clear();
if (this->capacity() < NumElts)
@@ -380,7 +384,7 @@ public:
this->setEnd(this->begin()+NumElts);
construct_range(this->begin(), this->end(), Elt);
}
-
+
iterator erase(iterator I) {
iterator N = I;
// Shift all elts down one.
@@ -389,7 +393,7 @@ public:
pop_back();
return(N);
}
-
+
iterator erase(iterator S, iterator E) {
iterator N = S;
// Shift all elts down.
@@ -399,13 +403,13 @@ public:
this->setEnd(I);
return(N);
}
-
+
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
push_back(Elt);
return this->end()-1;
}
-
+
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(this->back());
@@ -420,22 +424,22 @@ public:
I = this->begin()+EltNo;
goto Retry;
}
-
+
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->end()-1;
}
-
+
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -443,48 +447,48 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::fill_n(I, NumToInsert, Elt);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
-
+
// Insert the non-overwritten middle part.
std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
return I;
}
-
+
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->end()-1;
}
-
+
size_t NumToInsert = std::distance(From, To);
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
-
+
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
-
+
// Uninvalidate the iterator.
I = this->begin()+InsertElt;
-
+
// If there are more elements between the insertion point and the end of the
// range than there are being inserted, we can use a simple approach to
// insertion. Since we already reserved space, we know that this won't
@@ -492,37 +496,37 @@ public:
if (size_t(this->end()-I) >= NumToInsert) {
T *OldEnd = this->end();
append(this->end()-NumToInsert, this->end());
-
+
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
-
+
std::copy(From, To, I);
return I;
}
-
+
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
-
+
// Copy over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
-
+
// Replace the overwritten part.
for (; NumOverwritten > 0; --NumOverwritten) {
*I = *From;
++I; ++From;
}
-
+
// Insert the non-overwritten middle part.
this->uninitialized_copy(From, To, OldEnd);
return I;
}
-
+
const SmallVectorImpl
&operator=(const SmallVectorImpl &RHS);
-
+
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
return std::equal(this->begin(), this->end(), RHS.begin());
@@ -530,12 +534,12 @@ public:
bool operator!=(const SmallVectorImpl &RHS) const {
return !(*this == RHS);
}
-
+
bool operator<(const SmallVectorImpl &RHS) const {
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
-
+
/// set_size - Set the array size to \arg N, which the current array must have
/// enough capacity for.
///
@@ -549,14 +553,14 @@ public:
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
-
+
private:
static void construct_range(T *S, T *E, const T &Elt) {
for (; S != E; ++S)
new (S) T(Elt);
}
};
-
+
template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
diff --git a/contrib/llvm/include/llvm/ADT/Statistic.h b/contrib/llvm/include/llvm/ADT/Statistic.h
index c593c58..3a1319f 100644
--- a/contrib/llvm/include/llvm/ADT/Statistic.h
+++ b/contrib/llvm/include/llvm/ADT/Statistic.h
@@ -56,6 +56,10 @@ public:
}
const Statistic &operator++() {
+ // FIXME: This function and all those that follow carefully use an
+ // atomic operation to update the value safely in the presence of
+ // concurrent accesses, but not to read the return value, so the
+ // return value is not thread safe.
sys::AtomicIncrement(&Value);
return init();
}
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
index be31ea0..feade6a 100644
--- a/contrib/llvm/include/llvm/ADT/Triple.h
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -100,7 +100,8 @@ public:
Psp,
Solaris,
Win32,
- Haiku
+ Haiku,
+ Minix
};
private:
@@ -242,8 +243,8 @@ public:
/// environment components with a single string.
void setOSAndEnvironmentName(StringRef Str);
- /// getArchNameForAssembler - Get an architecture name that is understood by the
- /// target assembler.
+ /// getArchNameForAssembler - Get an architecture name that is understood by
+ /// the target assembler.
const char *getArchNameForAssembler();
/// @}
diff --git a/contrib/llvm/include/llvm/ADT/ValueMap.h b/contrib/llvm/include/llvm/ADT/ValueMap.h
index 6f57fe8..9e30bd4 100644
--- a/contrib/llvm/include/llvm/ADT/ValueMap.h
+++ b/contrib/llvm/include/llvm/ADT/ValueMap.h
@@ -59,16 +59,16 @@ struct ValueMapConfig {
struct ExtraData {};
template<typename ExtraDataT>
- static void onRAUW(const ExtraDataT &Data, KeyT Old, KeyT New) {}
+ static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
template<typename ExtraDataT>
- static void onDelete(const ExtraDataT &Data, KeyT Old) {}
+ static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
/// Returns a mutex that should be acquired around any changes to the map.
/// This is only acquired from the CallbackVH (and held around calls to onRAUW
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
template<typename ExtraDataT>
- static sys::Mutex *getMutex(const ExtraDataT &Data) { return NULL; }
+ static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
};
/// See the file comment.
diff --git a/contrib/llvm/include/llvm/ADT/ilist.h b/contrib/llvm/include/llvm/ADT/ilist.h
index e4d26dd..9479d00 100644
--- a/contrib/llvm/include/llvm/ADT/ilist.h
+++ b/contrib/llvm/include/llvm/ADT/ilist.h
@@ -39,6 +39,7 @@
#define LLVM_ADT_ILIST_H
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/AbstractTypeUser.h b/contrib/llvm/include/llvm/AbstractTypeUser.h
index b6cceb4..81f5c5c 100644
--- a/contrib/llvm/include/llvm/AbstractTypeUser.h
+++ b/contrib/llvm/include/llvm/AbstractTypeUser.h
@@ -146,6 +146,7 @@ class PATypeHolder {
mutable const Type *Ty;
void destroy();
public:
+ PATypeHolder() : Ty(0) {}
PATypeHolder(const Type *ty) : Ty(ty) {
addRef();
}
@@ -153,7 +154,7 @@ public:
addRef();
}
- ~PATypeHolder() { if (Ty) dropRef(); }
+ ~PATypeHolder() { dropRef(); }
operator Type *() const { return get(); }
Type *get() const;
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
index 9f41135..e611a35 100644
--- a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -165,27 +165,6 @@ public:
/// ModRefInfo - Whether the pointer is loaded or stored to/from.
///
ModRefResult ModRefInfo;
-
- /// AccessType - Specific fine-grained access information for the argument.
- /// If none of these classifications is general enough, the
- /// getModRefBehavior method should not return AccessesArguments*. If a
- /// record is not returned for a particular argument, the argument is never
- /// dead and never dereferenced.
- enum AccessType {
- /// ScalarAccess - The pointer is dereferenced.
- ///
- ScalarAccess,
-
- /// ArrayAccess - The pointer is indexed through as an array of elements.
- ///
- ArrayAccess,
-
- /// ElementAccess ?? P->F only?
-
- /// CallsThrough - Indirect calls are made through the specified function
- /// pointer.
- CallsThrough
- };
};
/// getModRefBehavior - Return the behavior when calling the given call site.
diff --git a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
index 6ad2e5a..ac8f596 100644
--- a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
+++ b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -1,4 +1,4 @@
-//===-- CFGPrinter.h - CFG printer external interface ------------*- C++ -*-===//
+//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -43,8 +43,8 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
return OS.str();
}
- static std::string getCompleteNodeLabel(const BasicBlock *Node,
- const Function *Graph) {
+ static std::string getCompleteNodeLabel(const BasicBlock *Node,
+ const Function *Graph) {
std::string Str;
raw_string_ostream OS(Str);
diff --git a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
index 493ecf5..b3390f4 100644
--- a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -21,9 +21,9 @@ namespace llvm {
/// by the enclosing function (which is required to exist). This routine can
/// be expensive, so consider caching the results. The boolean ReturnCaptures
/// specifies whether returning the value (or part of it) from the function
- /// counts as capturing it or not. The boolean StoreCaptures specified whether
- /// storing the value (or part of it) into memory anywhere automatically
- /// counts as capturing it or not.
+ /// counts as capturing it or not. The boolean StoreCaptures specified
+ /// whether storing the value (or part of it) into memory anywhere
+ /// automatically counts as capturing it or not.
bool PointerMayBeCaptured(const Value *V,
bool ReturnCaptures,
bool StoreCaptures);
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
new file mode 100644
index 0000000..58096f1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -0,0 +1,72 @@
+//===- CodeMetrics.h - Measures the weight of a function---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various weight measurements for a function, helping
+// the Inliner and PartialSpecialization decide whether to duplicate its
+// contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CODEMETRICS_H
+#define LLVM_ANALYSIS_CODEMETRICS_H
+
+namespace llvm {
+ // CodeMetrics - Calculate size and a few similar metrics for a set of
+ // basic blocks.
+ struct CodeMetrics {
+ /// NeverInline - True if this callee should never be inlined into a
+ /// caller.
+ // bool NeverInline;
+
+ // True if this function contains a call to setjmp or _setjmp
+ bool callsSetJmp;
+
+ // True if this function calls itself
+ bool isRecursive;
+
+ // True if this function contains one or more indirect branches
+ bool containsIndirectBr;
+
+ /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
+ bool usesDynamicAlloca;
+
+ /// NumInsts, NumBlocks - Keep track of how large each function is, which
+ /// is used to estimate the code size cost of inlining it.
+ unsigned NumInsts, NumBlocks;
+
+ /// NumBBInsts - Keeps track of basic block code size estimates.
+ DenseMap<const BasicBlock *, unsigned> NumBBInsts;
+
+ /// NumCalls - Keep track of the number of calls to 'big' functions.
+ unsigned NumCalls;
+
+ /// NumVectorInsts - Keep track of how many instructions produce vector
+ /// values. The inliner is being more aggressive with inlining vector
+ /// kernels.
+ unsigned NumVectorInsts;
+
+ /// NumRets - Keep track of how many Ret instructions the block contains.
+ unsigned NumRets;
+
+ CodeMetrics() : callsSetJmp(false), isRecursive(false),
+ containsIndirectBr(false), usesDynamicAlloca(false),
+ NumInsts(0), NumBlocks(0), NumCalls(0), NumVectorInsts(0),
+ NumRets(0) {}
+
+ /// analyzeBasicBlock - Add information about the specified basic block
+ /// to the current structure.
+ void analyzeBasicBlock(const BasicBlock *BB);
+
+ /// analyzeFunction - Add information about the specified function
+ /// to the current structure.
+ void analyzeFunction(Function *F);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DebugInfo.h b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
index 473b127..a85b6bc 100644
--- a/contrib/llvm/include/llvm/Analysis/DebugInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/DebugInfo.h
@@ -56,6 +56,7 @@ namespace llvm {
}
GlobalVariable *getGlobalVariableField(unsigned Elt) const;
+ Function *getFunctionField(unsigned Elt) const;
public:
explicit DIDescriptor() : DbgNode(0) {}
@@ -409,6 +410,8 @@ namespace llvm {
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
+
+ Function *getFunction() const { return getFunctionField(16); }
};
/// DIGlobalVariable - This is a wrapper for a global variable.
@@ -577,7 +580,8 @@ namespace llvm {
unsigned RunTimeVer = 0);
/// CreateFile - Create a new descriptor for the specified file.
- DIFile CreateFile(StringRef Filename, StringRef Directory, DICompileUnit CU);
+ DIFile CreateFile(StringRef Filename, StringRef Directory,
+ DICompileUnit CU);
/// CreateEnumerator - Create a single enumerator value.
DIEnumerator CreateEnumerator(StringRef Name, uint64_t Val);
@@ -658,7 +662,8 @@ namespace llvm {
unsigned VIndex = 0,
DIType = DIType(),
bool isArtificial = 0,
- bool isOptimized = false);
+ bool isOptimized = false,
+ Function *Fn = 0);
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
diff --git a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
index 8cea96d..0419688 100644
--- a/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
+++ b/contrib/llvm/include/llvm/Analysis/DominatorInternals.h
@@ -152,8 +152,9 @@ void Compress(DominatorTreeBase<typename GraphT::NodeType>& DT,
}
template<class GraphT>
-typename GraphT::NodeType* Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
- typename GraphT::NodeType *V) {
+typename GraphT::NodeType*
+Eval(DominatorTreeBase<typename GraphT::NodeType>& DT,
+ typename GraphT::NodeType *V) {
typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
DT.Info[V];
#if !BALANCE_IDOM_TREE
@@ -265,14 +266,17 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
// initialize the semi dominator to point to the parent node
WInfo.Semi = WInfo.Parent;
- for (typename GraphTraits<Inverse<NodeT> >::ChildIteratorType CI =
- GraphTraits<Inverse<NodeT> >::child_begin(W),
- E = GraphTraits<Inverse<NodeT> >::child_end(W); CI != E; ++CI)
- if (DT.Info.count(*CI)) { // Only if this predecessor is reachable!
- unsigned SemiU = DT.Info[Eval<GraphT>(DT, *CI)].Semi;
+ typedef GraphTraits<Inverse<NodeT> > InvTraits;
+ for (typename InvTraits::ChildIteratorType CI =
+ InvTraits::child_begin(W),
+ E = InvTraits::child_end(W); CI != E; ++CI) {
+ typename InvTraits::NodeType *N = *CI;
+ if (DT.Info.count(N)) { // Only if this predecessor is reachable!
+ unsigned SemiU = DT.Info[Eval<GraphT>(DT, N)].Semi;
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
+ }
DT.Info[DT.Vertex[WInfo.Semi]].Bucket.push_back(W);
diff --git a/contrib/llvm/include/llvm/Analysis/Dominators.h b/contrib/llvm/include/llvm/Analysis/Dominators.h
index f810310..1979d3f 100644
--- a/contrib/llvm/include/llvm/Analysis/Dominators.h
+++ b/contrib/llvm/include/llvm/Analysis/Dominators.h
@@ -246,22 +246,25 @@ protected:
typename GraphT::NodeType* NewBBSucc = *GraphT::child_begin(NewBB);
std::vector<typename GraphT::NodeType*> PredBlocks;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBB),
- PE = GraphTraits<Inverse<N> >::child_end(NewBB); PI != PE; ++PI)
+ typedef GraphTraits<Inverse<N> > InvTraits;
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBB),
+ PE = InvTraits::child_end(NewBB); PI != PE; ++PI)
PredBlocks.push_back(*PI);
- assert(!PredBlocks.empty() && "No predblocks??");
+ assert(!PredBlocks.empty() && "No predblocks?");
bool NewBBDominatesNewBBSucc = true;
- for (typename GraphTraits<Inverse<N> >::ChildIteratorType PI =
- GraphTraits<Inverse<N> >::child_begin(NewBBSucc),
- E = GraphTraits<Inverse<N> >::child_end(NewBBSucc); PI != E; ++PI)
- if (*PI != NewBB && !DT.dominates(NewBBSucc, *PI) &&
- DT.isReachableFromEntry(*PI)) {
+ for (typename InvTraits::ChildIteratorType PI =
+ InvTraits::child_begin(NewBBSucc),
+ E = InvTraits::child_end(NewBBSucc); PI != E; ++PI) {
+ typename InvTraits::NodeType *ND = *PI;
+ if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
+ DT.isReachableFromEntry(ND)) {
NewBBDominatesNewBBSucc = false;
break;
}
+ }
// Find NewBB's immediate dominator and create new dominator tree node for
// NewBB.
@@ -704,7 +707,6 @@ public:
}
~DominatorTree() {
- DT->releaseMemory();
delete DT;
}
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
index cac7cfe..462bddd 100644
--- a/contrib/llvm/include/llvm/Analysis/InlineCost.h
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -19,6 +19,7 @@
#include <vector>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ValueMap.h"
+#include "llvm/Analysis/CodeMetrics.h"
namespace llvm {
@@ -29,46 +30,6 @@ namespace llvm {
template<class PtrType, unsigned SmallSize>
class SmallPtrSet;
- // CodeMetrics - Calculate size and a few similar metrics for a set of
- // basic blocks.
- struct CodeMetrics {
- /// NeverInline - True if this callee should never be inlined into a
- /// caller.
- bool NeverInline;
-
- /// usesDynamicAlloca - True if this function calls alloca (in the C sense).
- bool usesDynamicAlloca;
-
- /// NumInsts, NumBlocks - Keep track of how large each function is, which
- /// is used to estimate the code size cost of inlining it.
- unsigned NumInsts, NumBlocks;
-
- /// NumBBInsts - Keeps track of basic block code size estimates.
- DenseMap<const BasicBlock *, unsigned> NumBBInsts;
-
- /// NumCalls - Keep track of the number of calls to 'big' functions.
- unsigned NumCalls;
-
- /// NumVectorInsts - Keep track of how many instructions produce vector
- /// values. The inliner is being more aggressive with inlining vector
- /// kernels.
- unsigned NumVectorInsts;
-
- /// NumRets - Keep track of how many Ret instructions the block contains.
- unsigned NumRets;
-
- CodeMetrics() : NeverInline(false), usesDynamicAlloca(false), NumInsts(0),
- NumBlocks(0), NumCalls(0), NumVectorInsts(0), NumRets(0) {}
-
- /// analyzeBasicBlock - Add information about the specified basic block
- /// to the current structure.
- void analyzeBasicBlock(const BasicBlock *BB);
-
- /// analyzeFunction - Add information about the specified function
- /// to the current structure.
- void analyzeFunction(Function *F);
- };
-
namespace InlineConstants {
// Various magic constants used to adjust heuristics.
const int InstrCost = 5;
@@ -163,6 +124,10 @@ namespace llvm {
/// analyzeFunction - Add information about the specified function
/// to the current structure.
void analyzeFunction(Function *F);
+
+ /// NeverInline - Returns true if the function should never be
+ /// inlined into any caller.
+ bool NeverInline();
};
// The Function* for a function can be changed (by ArgumentPromotion);
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalIterator.h b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
index d842840..82b3294 100644
--- a/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
+++ b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
@@ -36,9 +36,9 @@
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/Function.h"
#include "llvm/Support/CFG.h"
-#include <stack>
-#include <set>
#include <algorithm>
+#include <set>
+#include <vector>
namespace llvm {
@@ -88,7 +88,7 @@ inline void addNodeToInterval(Interval *Int, Interval *I) {
template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
class IGT = GraphTraits<Inverse<NodeTy*> > >
class IntervalIterator {
- std::stack<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
+ std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
std::set<BasicBlock*> Visited;
OrigContainer_t *OrigContainer;
bool IOwnMem; // If True, delete intervals when done with them
@@ -116,15 +116,15 @@ public:
if (IOwnMem)
while (!IntStack.empty()) {
delete operator*();
- IntStack.pop();
+ IntStack.pop_back();
}
}
inline bool operator==(const _Self& x) const { return IntStack == x.IntStack;}
inline bool operator!=(const _Self& x) const { return !operator==(x); }
- inline const Interval *operator*() const { return IntStack.top().first; }
- inline Interval *operator*() { return IntStack.top().first; }
+ inline const Interval *operator*() const { return IntStack.back().first; }
+ inline Interval *operator*() { return IntStack.back().first; }
inline const Interval *operator->() const { return operator*(); }
inline Interval *operator->() { return operator*(); }
@@ -133,8 +133,8 @@ public:
do {
// All of the intervals on the stack have been visited. Try visiting
// their successors now.
- Interval::succ_iterator &SuccIt = IntStack.top().second,
- EndIt = succ_end(IntStack.top().first);
+ Interval::succ_iterator &SuccIt = IntStack.back().second,
+ EndIt = succ_end(IntStack.back().first);
while (SuccIt != EndIt) { // Loop over all interval succs
bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
++SuccIt; // Increment iterator
@@ -142,10 +142,10 @@ public:
}
// Free interval memory... if necessary
- if (IOwnMem) delete IntStack.top().first;
+ if (IOwnMem) delete IntStack.back().first;
// We ran out of successors for this interval... pop off the stack
- IntStack.pop();
+ IntStack.pop_back();
} while (!IntStack.empty());
return *this;
@@ -175,7 +175,7 @@ private:
E = GT::child_end(Node); I != E; ++I)
ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
- IntStack.push(std::make_pair(Int, succ_begin(Int)));
+ IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
return true;
}
diff --git a/contrib/llvm/include/llvm/Analysis/Loads.h b/contrib/llvm/include/llvm/Analysis/Loads.h
new file mode 100644
index 0000000..1574262
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Loads.h
@@ -0,0 +1,51 @@
+//===- Loads.h - Local load analysis --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOADS_H
+#define LLVM_ANALYSIS_LOADS_H
+
+#include "llvm/BasicBlock.h"
+
+namespace llvm {
+
+class AliasAnalysis;
+class TargetData;
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD = 0);
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
+/// the instruction before ScanFrom) checking to see if we have the value at
+/// the memory address *Ptr locally available within a small number of
+/// instructions. If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't
+/// find something that invalidates *Ptr or provides it, ScanFrom would be
+/// left at begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block.
+/// If it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan = 6,
+ AliasAnalysis *AA = 0);
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
index 2babc25..9455fd8 100644
--- a/contrib/llvm/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -256,6 +256,27 @@ public:
///
BlockT *getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = getLoopPredecessor();
+ if (!Out) return 0;
+
+ // Make sure there is only one exit out of the preheader.
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+ ++SI;
+ if (SI != BlockTraits::child_end(Out))
+ return 0; // Multiple exits from the block, must not be a preheader.
+
+ // The predecessor has exactly one successor, so it is a preheader.
+ return Out;
+ }
+
+ /// getLoopPredecessor - If the given loop's header has exactly one unique
+ /// predecessor outside the loop, return it. Otherwise return null.
+ /// This is less strict that the loop "preheader" concept, which requires
+ /// the predecessor to have exactly one successor.
+ ///
+ BlockT *getLoopPredecessor() const {
+ // Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
// Loop over the predecessors of the header node...
@@ -264,22 +285,17 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
- PE = InvBlockTraits::child_end(Header); PI != PE; ++PI)
- if (!contains(*PI)) { // If the block is not in the loop...
- if (Out && Out != *PI)
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (!contains(N)) { // If the block is not in the loop...
+ if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
- Out = *PI;
+ Out = N;
}
+ }
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
- typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
- ++SI;
- if (SI != BlockTraits::child_end(Out))
- return 0; // Multiple exits from the block, must not be a preheader.
-
- // If there is exactly one preheader, return it. If there was zero, then
- // Out is still null.
return Out;
}
@@ -293,11 +309,13 @@ public:
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
- for (; PI != PE; ++PI)
- if (contains(*PI)) {
+ for (; PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (contains(N)) {
if (Latch) return 0;
- Latch = *PI;
+ Latch = N;
}
+ }
return Latch;
}
@@ -409,10 +427,11 @@ public:
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
- if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *PI))
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
- OutsideLoopPreds.push_back(*PI);
+ OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
@@ -743,9 +762,11 @@ public:
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
- I != E; ++I)
- if (DT.dominates(BB, *I)) // If BB dominates its predecessor...
- TodoStack.push_back(*I);
+ I != E; ++I) {
+ typename InvBlockTraits::NodeType *N = *I;
+ if (DT.dominates(BB, N)) // If BB dominates its predecessor...
+ TodoStack.push_back(N);
+ }
if (TodoStack.empty()) return 0; // No backedges to this block...
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
index a7f42c9..a4f9162 100644
--- a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -72,8 +72,8 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool isFreeCall(const Value *I);
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I);
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
index d3a8d8f..8da3af0 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -343,10 +343,6 @@ namespace llvm {
BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned);
- /// getLoopPredecessor - If the given loop's header has exactly one unique
- /// predecessor outside the loop, return it. Otherwise return null.
- BasicBlock *getLoopPredecessor(const Loop *L);
-
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
@@ -530,10 +526,6 @@ namespace llvm {
/// widening.
const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
- /// getIntegerSCEV - Given a SCEVable type, create a constant for the
- /// specified signed integer value and return a SCEV for the constant.
- const SCEV *getIntegerSCEV(int64_t Val, const Type *Ty);
-
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
index baf6946..9501555 100644
--- a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -32,6 +32,7 @@ namespace llvm {
std::map<std::pair<const SCEV *, Instruction *>, AssertingVH<Value> >
InsertedExpressions;
std::set<Value*> InsertedValues;
+ std::set<Value*> InsertedPostIncValues;
/// PostIncLoops - Addrecs referring to any of the given loops are expanded
/// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
@@ -102,6 +103,10 @@ namespace llvm {
/// clearPostInc - Disable all post-inc expansion.
void clearPostInc() {
PostIncLoops.clear();
+
+ // When we change the post-inc loop set, cached expansions may no
+ // longer be valid.
+ InsertedPostIncValues.clear();
}
/// disableCanonicalMode - Disable the behavior of expanding expressions in
@@ -123,6 +128,14 @@ namespace llvm {
/// of work to avoid inserting an obviously redundant operation.
Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
+ /// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+ /// reusing an existing cast if a suitable one exists, moving an existing
+ /// cast if a suitable one exists but isn't in the right place, or
+ /// or creating a new one.
+ Value *ReuseOrCreateCast(Value *V, const Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP);
+
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to
/// share the casts.
@@ -146,7 +159,7 @@ namespace llvm {
/// inserted by the code rewriter. If so, the client should not modify the
/// instruction.
bool isInsertedInstruction(Instruction *I) const {
- return InsertedValues.count(I);
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
}
Value *visitConstant(const SCEVConstant *S) {
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
index d580897..b9634f0 100644
--- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -97,7 +97,7 @@ namespace llvm {
- /// FindScalarValue - Given an aggregrate and an sequence of indices, see if
+ /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
/// the scalar value indexed is already around as a register, for example if
/// it were inserted directly into the aggregrate.
///
diff --git a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
index 45eb801..a186964 100644
--- a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
+++ b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -40,7 +40,8 @@ namespace llvm {
std::string *ErrMsg = 0);
/// WriteBitcodeToFile - Write the specified module to the specified
- /// raw output stream.
+ /// raw output stream. For streams where it matters, the given stream
+ /// should be in "binary" mode.
void WriteBitcodeToFile(const Module *M, raw_ostream &Out);
/// WriteBitcodeToStream - Write the specified module to the specified
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
index 243ddbb..7ca6c62 100644
--- a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -64,7 +64,7 @@ namespace llvm {
/// Target machine description.
///
TargetMachine &TM;
-
+
/// Target Asm Printer information.
///
const MCAsmInfo *MAI;
@@ -73,13 +73,13 @@ namespace llvm {
/// streaming. This owns all of the global MC-related objects for the
/// generated translation unit.
MCContext &OutContext;
-
+
/// OutStreamer - This is the MCStreamer object for the file we are
/// generating. This contains the transient state for the current
/// translation unit that we are generating (such as the current section
/// etc).
MCStreamer &OutStreamer;
-
+
/// The current machine function.
const MachineFunction *MF;
@@ -94,30 +94,30 @@ namespace llvm {
/// beginning of each call to runOnMachineFunction().
///
MCSymbol *CurrentFnSym;
-
+
private:
// GCMetadataPrinters - The garbage collection metadata printer table.
void *GCMetadataPrinters; // Really a DenseMap.
-
+
/// VerboseAsm - Emit comments in assembly output if this is true.
///
bool VerboseAsm;
static char ID;
-
+
/// If VerboseAsm is set, a pointer to the loop info for this
/// function.
MachineLoopInfo *LI;
/// DD - If the target supports dwarf debug info, this pointer is non-null.
DwarfDebug *DD;
-
+
/// DE - If the target supports dwarf exception info, this pointer is
/// non-null.
DwarfException *DE;
-
+
protected:
explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
-
+
public:
virtual ~AsmPrinter();
@@ -128,7 +128,7 @@ namespace llvm {
/// getFunctionNumber - Return a unique ID for the current function.
///
unsigned getFunctionNumber() const;
-
+
/// getObjFileLowering - Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const;
@@ -137,16 +137,16 @@ namespace llvm {
/// getCurrentSection() - Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
-
-
+
+
//===------------------------------------------------------------------===//
// MachineFunctionPass Implementation.
//===------------------------------------------------------------------===//
-
+
/// getAnalysisUsage - Record analysis usage.
- ///
+ ///
void getAnalysisUsage(AnalysisUsage &AU) const;
-
+
/// doInitialization - Set up the AsmPrinter when we are working on a new
/// module. If your pass overrides this, it must make sure to explicitly
/// call this implementation.
@@ -155,7 +155,7 @@ namespace llvm {
/// doFinalization - Shut down the asmprinter. If you override this in your
/// pass, you must make sure to call it explicitly.
bool doFinalization(Module &M);
-
+
/// runOnMachineFunction - Emit the specified function out to the
/// OutStreamer.
virtual bool runOnMachineFunction(MachineFunction &MF) {
@@ -163,20 +163,20 @@ namespace llvm {
EmitFunctionHeader();
EmitFunctionBody();
return false;
- }
-
+ }
+
//===------------------------------------------------------------------===//
// Coarse grained IR lowering routines.
//===------------------------------------------------------------------===//
-
+
/// SetupMachineFunction - This should be called when a new MachineFunction
/// is being processed from runOnMachineFunction.
void SetupMachineFunction(MachineFunction &MF);
-
+
/// EmitFunctionHeader - This method emits the header for the current
/// function.
void EmitFunctionHeader();
-
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void EmitFunctionBody();
@@ -187,15 +187,15 @@ namespace llvm {
/// the code generator.
///
virtual void EmitConstantPool();
-
- /// EmitJumpTableInfo - Print assembly representations of the jump tables
- /// used by the current function to the current output stream.
+
+ /// EmitJumpTableInfo - Print assembly representations of the jump tables
+ /// used by the current function to the current output stream.
///
void EmitJumpTableInfo();
-
+
/// EmitGlobalVariable - Emit the specified global variable to the .s file.
virtual void EmitGlobalVariable(const GlobalVariable *GV);
-
+
/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
/// special global used by LLVM. If so, emit it and return true, otherwise
/// do nothing and return false.
@@ -208,54 +208,54 @@ namespace llvm {
/// if required for correctness.
///
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
-
+
/// EmitBasicBlockStart - This method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing
/// it if appropriate.
void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
-
+
/// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void EmitGlobalConstant(const Constant *CV, unsigned AddrSpace = 0);
-
-
+
+
//===------------------------------------------------------------------===//
// Overridable Hooks
//===------------------------------------------------------------------===//
-
+
// Targets can, or in the case of EmitInstruction, must implement these to
// customize output.
-
+
/// EmitStartOfAsmFile - This virtual method can be overridden by targets
/// that want to emit something at the start of their file.
virtual void EmitStartOfAsmFile(Module &) {}
-
+
/// EmitEndOfAsmFile - This virtual method can be overridden by targets that
/// want to emit something at the end of their file.
virtual void EmitEndOfAsmFile(Module &) {}
-
+
/// EmitFunctionBodyStart - Targets can override this to emit stuff before
/// the first basic block in the function.
virtual void EmitFunctionBodyStart() {}
-
+
/// EmitFunctionBodyEnd - Targets can override this to emit stuff after
/// the last basic block in the function.
virtual void EmitFunctionBodyEnd() {}
-
+
/// EmitInstruction - Targets should implement this to emit instructions.
virtual void EmitInstruction(const MachineInstr *) {
assert(0 && "EmitInstruction not implemented");
}
-
+
virtual void EmitFunctionEntryLabel();
-
+
virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
-
+
/// isBlockOnlyReachableByFallthough - Return true if the basic block has
/// exactly one predecessor and the control transfer mechanism between
/// the predecessor and this block is a fall-through.
virtual bool
isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
-
+
//===------------------------------------------------------------------===//
// Symbol Lowering Routines.
//===------------------------------------------------------------------===//
@@ -264,23 +264,23 @@ namespace llvm {
/// GetTempSymbol - Return the MCSymbol corresponding to the assembler
/// temporary label with the specified stem and unique ID.
MCSymbol *GetTempSymbol(StringRef Name, unsigned ID) const;
-
+
/// GetTempSymbol - Return an assembler temporary label with the specified
/// stem.
MCSymbol *GetTempSymbol(StringRef Name) const;
-
-
+
+
/// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
/// global value name as its base, with the specified suffix, and where the
/// symbol is forced to have private linkage if ForcePrivate is true.
MCSymbol *GetSymbolWithGlobalValueBase(const GlobalValue *GV,
StringRef Suffix,
bool ForcePrivate = true) const;
-
+
/// GetExternalSymbolSymbol - Return the MCSymbol for the specified
/// ExternalSymbol.
MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
-
+
/// GetCPISymbol - Return the symbol for the specified constant pool entry.
MCSymbol *GetCPISymbol(unsigned CPID) const;
@@ -302,42 +302,42 @@ namespace llvm {
public:
/// printOffset - This is just convenient handler for printing offsets.
void printOffset(int64_t Offset, raw_ostream &OS) const;
-
+
/// EmitInt8 - Emit a byte directive and value.
///
void EmitInt8(int Value) const;
-
+
/// EmitInt16 - Emit a short directive and value.
///
void EmitInt16(int Value) const;
-
+
/// EmitInt32 - Emit a long directive and value.
///
void EmitInt32(int Value) const;
-
+
/// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
/// in bytes of the directive is specified by Size and Hi/Lo specify the
/// labels. This implicitly uses .set if it is available.
void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Size) const;
-
- /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
+
+ /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
/// where the size in bytes of the directive is specified by Size and Hi/Lo
/// specify the labels. This implicitly uses .set if it is available.
void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
const MCSymbol *Lo, unsigned Size) const;
-
+
//===------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===------------------------------------------------------------------===//
-
+
/// EmitSLEB128 - emit the specified signed leb128 value.
void EmitSLEB128(int Value, const char *Desc = 0) const;
-
+
/// EmitULEB128 - emit the specified unsigned leb128 value.
void EmitULEB128(unsigned Value, const char *Desc = 0,
unsigned PadTo = 0) const;
-
+
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
void EmitCFAByte(unsigned Val) const;
@@ -346,15 +346,15 @@ namespace llvm {
/// describing the encoding. Desc is a string saying what the encoding is
/// specifying (e.g. "LSDA").
void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
-
+
/// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
-
+
/// EmitReference - Emit a reference to a label with a specified encoding.
///
void EmitReference(const MCSymbol *Sym, unsigned Encoding) const;
void EmitReference(const GlobalValue *GV, unsigned Encoding) const;
-
+
/// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
/// its section. This can be done with a special directive if the target
/// supports it (e.g. cygwin) or by emitting it as an offset from a label at
@@ -372,20 +372,20 @@ namespace llvm {
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
-
+
/// EmitFrameMoves - Emit frame instructions to describe the layout of the
/// frame.
- void EmitFrameMoves(const std::vector<MachineMove> &Moves,
+ void EmitFrameMoves(const std::vector<MachineMove> &Moves,
MCSymbol *BaseLabel, bool isEH) const;
-
-
+
+
//===------------------------------------------------------------------===//
// Inline Asm Support
//===------------------------------------------------------------------===//
public:
// These are hooks that targets can override to implement inline asm
// support. These should probably be moved out of AsmPrinter someday.
-
+
/// PrintSpecial - Print information related to the specified machine instr
/// that is independent of the operand, and may be independent of the instr
/// itself. This can be useful for portably encoding the comment character
@@ -394,7 +394,7 @@ namespace llvm {
/// for their own strange codes.
virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
const char *Code) const;
-
+
/// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
/// instruction, using the specified assembler variant. Targets should
/// override this to format as appropriate. This method can return true if
@@ -402,16 +402,16 @@ namespace llvm {
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &OS);
-
+
/// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
/// instruction, using the specified assembler variant as an address.
/// Targets should override this to format as appropriate. This method can
/// return true if the operand is erroneous.
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
+ unsigned AsmVariant,
const char *ExtraCode,
raw_ostream &OS);
-
+
private:
/// Private state for PrintSpecial()
// Assign a unique ID to this machine instruction.
@@ -422,7 +422,7 @@ namespace llvm {
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
void EmitInlineAsm(StringRef Str, unsigned LocCookie) const;
-
+
/// EmitInlineAsm - This method formats and emits the specified machine
/// instruction that is an inline asm.
void EmitInlineAsm(const MachineInstr *MI) const;
@@ -430,13 +430,13 @@ namespace llvm {
//===------------------------------------------------------------------===//
// Internal Implementation Details
//===------------------------------------------------------------------===//
-
+
/// EmitVisibility - This emits visibility information about symbol, if
/// this is suported by the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility) const;
-
+
void EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const;
-
+
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
index 45a2757..7911907 100644
--- a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
+++ b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -17,14 +17,13 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/CallingConv.h"
namespace llvm {
class TargetRegisterInfo;
class TargetMachine;
class CCState;
- class SDNode;
/// CCValAssign - Represent assignment of one arg/retval to a location.
class CCValAssign {
@@ -35,6 +34,9 @@ public:
ZExt, // The value is zero extended in the location.
AExt, // The value is extended with undefined upper bits.
BCvt, // The value is bit-converted in the location.
+ VExt, // The value is vector-widened in the location.
+ // FIXME: Not implemented yet. Code that uses AExt to mean
+ // vector-widen should be fixed to use VExt instead.
Indirect // The location contains pointer to the value.
// TODO: a subset of the value is in the location.
};
@@ -186,8 +188,7 @@ public:
/// CheckReturn - Analyze the return values of a function, returning
/// true if the return can be performed without sret-demotion, and
/// false otherwise.
- bool CheckReturn(const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
CCAssignFn Fn);
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
index 005c7bc..79b1554 100644
--- a/contrib/llvm/include/llvm/CodeGen/FastISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -19,11 +19,13 @@
#include "llvm/ADT/SmallSet.h"
#endif
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
namespace llvm {
class AllocaInst;
class ConstantFP;
+class FunctionLoweringInfo;
class Instruction;
class MachineBasicBlock;
class MachineConstantPool;
@@ -36,22 +38,15 @@ class TargetInstrInfo;
class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
+class TargetRegisterInfo;
/// FastISel - This is a fast-path instruction selection class that
/// generates poor code and doesn't support illegal types or non-trivial
/// lowering, but runs quickly.
class FastISel {
protected:
- MachineBasicBlock *MBB;
DenseMap<const Value *, unsigned> LocalValueMap;
- DenseMap<const Value *, unsigned> &ValueMap;
- DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
- DenseMap<const AllocaInst *, int> &StaticAllocaMap;
- std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate;
-#ifndef NDEBUG
- SmallSet<const Instruction *, 8> &CatchInfoLost;
-#endif
- MachineFunction &MF;
+ FunctionLoweringInfo &FuncInfo;
MachineRegisterInfo &MRI;
MachineFrameInfo &MFI;
MachineConstantPool &MCP;
@@ -60,23 +55,22 @@ protected:
const TargetData &TD;
const TargetInstrInfo &TII;
const TargetLowering &TLI;
- bool IsBottomUp;
+ const TargetRegisterInfo &TRI;
+ MachineInstr *LastLocalValue;
public:
+ /// getLastLocalValue - Return the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ MachineInstr *getLastLocalValue() { return LastLocalValue; }
+
+ /// setLastLocalValue - Update the position of the last instruction
+ /// emitted for materializing constants for use in the current block.
+ void setLastLocalValue(MachineInstr *I) { LastLocalValue = I; }
+
/// startNewBlock - Set the current block to which generated machine
/// instructions will be appended, and clear the local CSE map.
///
- void startNewBlock(MachineBasicBlock *mbb) {
- setCurrentBlock(mbb);
- LocalValueMap.clear();
- }
-
- /// setCurrentBlock - Set the current block to which generated machine
- /// instructions will be appended.
- ///
- void setCurrentBlock(MachineBasicBlock *mbb) {
- MBB = mbb;
- }
+ void startNewBlock();
/// getCurDebugLoc() - Return current debug location information.
DebugLoc getCurDebugLoc() const { return DL; }
@@ -108,18 +102,26 @@ public:
/// index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+ /// recomputeInsertPt - Reset InsertPt to prepare for insterting instructions
+ /// into the current block.
+ void recomputeInsertPt();
+
+ struct SavePoint {
+ MachineBasicBlock::iterator InsertPt;
+ DebugLoc DL;
+ };
+
+ /// enterLocalValueArea - Prepare InsertPt to begin inserting instructions
+ /// into the local value area and return the old insert position.
+ SavePoint enterLocalValueArea();
+
+ /// leaveLocalValueArea - Reset InsertPt to the given old insert position.
+ void leaveLocalValueArea(SavePoint Old);
+
virtual ~FastISel();
protected:
- FastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- );
+ explicit FastISel(FunctionLoweringInfo &funcInfo);
/// TargetSelectInstruction - This method is called by target-independent
/// code when the normal FastISel process fails to select an instruction.
@@ -286,7 +288,7 @@ protected:
/// FastEmitBranch - Emit an unconditional branch to the given block,
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
- void FastEmitBranch(MachineBasicBlock *MBB);
+ void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
unsigned UpdateValueMap(const Value* I, unsigned Reg);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
index 4067a5b..c49d1ed 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef FUNCTIONLOWERINGINFO_H
-#define FUNCTIONLOWERINGINFO_H
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
@@ -25,6 +25,7 @@
#endif
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/CallSite.h"
#include <vector>
@@ -80,6 +81,15 @@ public:
/// function arguments that are inserted after scheduling is completed.
SmallVector<MachineInstr*, 8> ArgDbgValues;
+ /// RegFixups - Registers which need to be replaced after isel is done.
+ DenseMap<unsigned, unsigned> RegFixups;
+
+ /// MBB - The current block.
+ MachineBasicBlock *MBB;
+
+ /// MBB - The current insert position inside the current block.
+ MachineBasicBlock::iterator InsertPt;
+
#ifndef NDEBUG
SmallSet<const Instruction *, 8> CatchInfoLost;
SmallSet<const Instruction *, 8> CatchInfoFound;
@@ -106,27 +116,27 @@ public:
/// set - Initialize this FunctionLoweringInfo with the given Function
/// and its associated MachineFunction.
///
- void set(const Function &Fn, MachineFunction &MF, bool EnableFastISel);
+ void set(const Function &Fn, MachineFunction &MF);
/// clear - Clear out all the function-specific state. This returns this
/// FunctionLoweringInfo to an empty state, ready to be used for a
/// different function.
void clear();
- unsigned MakeReg(EVT VT);
-
/// isExportedInst - Return true if the specified value is an instruction
/// exported from its block.
bool isExportedInst(const Value *V) {
return ValueMap.count(V);
}
- unsigned CreateRegForValue(const Value *V);
+ unsigned CreateReg(EVT VT);
+
+ unsigned CreateRegs(const Type *Ty);
unsigned InitializeRegForValue(const Value *V) {
unsigned &R = ValueMap[V];
assert(R == 0 && "Already initialized this value register!");
- return R = CreateRegForValue(V);
+ return R = CreateRegs(V->getType());
}
};
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
index 6de69cd..b401068 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -1,4 +1,4 @@
-//===-- GCMetadata.h - Garbage collector metadata -------------------------===//
+//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,7 +14,7 @@
//
// The GCFunctionInfo class logs the data necessary to build a type accurate
// stack map. The code generator outputs:
-//
+//
// - Safe points as specified by the GCStrategy's NeededSafePoints.
// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
//
@@ -42,10 +42,10 @@ namespace llvm {
class GCStrategy;
class Constant;
class MCSymbol;
-
+
namespace GC {
/// PointKind - The type of a collector-safe point.
- ///
+ ///
enum PointKind {
Loop, //< Instr is a loop (backwards branch).
Return, //< Instr is a return instruction.
@@ -53,138 +53,138 @@ namespace llvm {
PostCall //< Instr is the return address of a call.
};
}
-
+
/// GCPoint - Metadata for a collector-safe point in machine code.
- ///
+ ///
struct GCPoint {
GC::PointKind Kind; //< The kind of the safe point.
MCSymbol *Label; //< A label.
-
+
GCPoint(GC::PointKind K, MCSymbol *L) : Kind(K), Label(L) {}
};
-
+
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
int Num; //< Usually a frame index.
int StackOffset; //< Offset from the stack pointer.
const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
-
+
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
-
-
+
+
/// GCFunctionInfo - Garbage collection metadata for a single function.
- ///
+ ///
class GCFunctionInfo {
public:
typedef std::vector<GCPoint>::iterator iterator;
typedef std::vector<GCRoot>::iterator roots_iterator;
typedef std::vector<GCRoot>::const_iterator live_iterator;
-
+
private:
const Function &F;
GCStrategy &S;
uint64_t FrameSize;
std::vector<GCRoot> Roots;
std::vector<GCPoint> SafePoints;
-
+
// FIXME: Liveness. A 2D BitVector, perhaps?
- //
+ //
// BitVector Liveness;
- //
+ //
// bool islive(int point, int root) =
// Liveness[point * SafePoints.size() + root]
- //
+ //
// The bit vector is the more compact representation where >3.2% of roots
// are live per safe point (1.5% on 64-bit hosts).
-
+
public:
GCFunctionInfo(const Function &F, GCStrategy &S);
~GCFunctionInfo();
-
+
/// getFunction - Return the function to which this metadata applies.
- ///
+ ///
const Function &getFunction() const { return F; }
-
+
/// getStrategy - Return the GC strategy for the function.
- ///
+ ///
GCStrategy &getStrategy() { return S; }
-
+
/// addStackRoot - Registers a root that lives on the stack. Num is the
/// stack object ID for the alloca (if the code generator is
// using MachineFrameInfo).
void addStackRoot(int Num, const Constant *Metadata) {
Roots.push_back(GCRoot(Num, Metadata));
}
-
+
/// addSafePoint - Notes the existence of a safe point. Num is the ID of the
- /// label just prior to the safe point (if the code generator is using
+ /// label just prior to the safe point (if the code generator is using
/// MachineModuleInfo).
void addSafePoint(GC::PointKind Kind, MCSymbol *Label) {
SafePoints.push_back(GCPoint(Kind, Label));
}
-
+
/// getFrameSize/setFrameSize - Records the function's frame size.
- ///
+ ///
uint64_t getFrameSize() const { return FrameSize; }
void setFrameSize(uint64_t S) { FrameSize = S; }
-
+
/// begin/end - Iterators for safe points.
- ///
+ ///
iterator begin() { return SafePoints.begin(); }
iterator end() { return SafePoints.end(); }
size_t size() const { return SafePoints.size(); }
-
+
/// roots_begin/roots_end - Iterators for all roots in the function.
- ///
+ ///
roots_iterator roots_begin() { return Roots.begin(); }
roots_iterator roots_end () { return Roots.end(); }
size_t roots_size() const { return Roots.size(); }
-
+
/// live_begin/live_end - Iterators for live roots at a given safe point.
- ///
+ ///
live_iterator live_begin(const iterator &p) { return roots_begin(); }
live_iterator live_end (const iterator &p) { return roots_end(); }
size_t live_size(const iterator &p) const { return roots_size(); }
};
-
-
+
+
/// GCModuleInfo - Garbage collection metadata for a whole module.
- ///
+ ///
class GCModuleInfo : public ImmutablePass {
typedef StringMap<GCStrategy*> strategy_map_type;
typedef std::vector<GCStrategy*> list_type;
typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
-
+
strategy_map_type StrategyMap;
list_type StrategyList;
finfo_map_type FInfoMap;
-
+
GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
-
+
public:
typedef list_type::const_iterator iterator;
-
+
static char ID;
-
+
GCModuleInfo();
~GCModuleInfo();
-
+
/// clear - Resets the pass. The metadata deleter pass calls this.
- ///
+ ///
void clear();
-
+
/// begin/end - Iterators for used strategies.
- ///
+ ///
iterator begin() const { return StrategyList.begin(); }
iterator end() const { return StrategyList.end(); }
-
+
/// get - Look up function metadata.
- ///
+ ///
GCFunctionInfo &getFunctionInfo(const Function &F);
};
-
+
}
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
index 3703545..17a2653 100644
--- a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -25,49 +25,49 @@
#include "llvm/Support/Registry.h"
namespace llvm {
-
+
class GCMetadataPrinter;
-
+
/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
-
+
/// GCMetadataPrinter - Emits GC metadata as assembly code.
- ///
+ ///
class GCMetadataPrinter {
public:
typedef GCStrategy::list_type list_type;
typedef GCStrategy::iterator iterator;
-
+
private:
GCStrategy *S;
-
+
friend class AsmPrinter;
-
+
protected:
// May only be subclassed.
GCMetadataPrinter();
-
+
// Do not implement.
GCMetadataPrinter(const GCMetadataPrinter &);
GCMetadataPrinter &operator=(const GCMetadataPrinter &);
-
+
public:
GCStrategy &getStrategy() { return *S; }
const Module &getModule() const { return S->getModule(); }
-
+
/// begin/end - Iterate over the collected function metadata.
iterator begin() { return S->begin(); }
iterator end() { return S->end(); }
-
+
/// beginAssembly/finishAssembly - Emit module metadata as assembly code.
virtual void beginAssembly(AsmPrinter &AP);
-
+
virtual void finishAssembly(AsmPrinter &AP);
-
+
virtual ~GCMetadataPrinter();
};
-
+
}
#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
index a5e9dd5..69de598 100644
--- a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -130,7 +130,7 @@ namespace ISD {
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
/// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
- /// node has returns the result of the intrinsic.
+ /// node returns the result of the intrinsic.
INTRINSIC_WO_CHAIN,
/// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
@@ -508,8 +508,9 @@ namespace ISD {
CALLSEQ_START, // Beginning of a call sequence
CALLSEQ_END, // End of a call sequence
- // VAARG - VAARG has three operands: an input chain, a pointer, and a
- // SRCVALUE. It returns a pair of values: the vaarg value and a new chain.
+ // VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+ // and the alignment. It returns a pair of values: the vaarg value and a
+ // new chain.
VAARG,
// VACOPY - VACOPY has five operands: an input chain, a destination pointer,
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
index b4c2f2f..cd8293d 100644
--- a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -33,7 +33,6 @@ namespace {
(void) llvm::createDeadMachineInstructionElimPass();
- (void) llvm::createLocalRegisterAllocator();
(void) llvm::createFastRegisterAllocator();
(void) llvm::createLinearScanRegisterAllocator();
(void) llvm::createPBQPRegisterAllocator();
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
index 637f52b..8d80efb 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -53,7 +53,7 @@ namespace llvm {
class VNInfo {
private:
enum {
- HAS_PHI_KILL = 1,
+ HAS_PHI_KILL = 1,
REDEF_BY_EC = 1 << 1,
IS_PHI_DEF = 1 << 2,
IS_UNUSED = 1 << 3,
@@ -67,22 +67,14 @@ namespace llvm {
} cr;
public:
- typedef SpecificBumpPtrAllocator<VNInfo> Allocator;
- typedef SmallVector<SlotIndex, 4> KillSet;
+ typedef BumpPtrAllocator Allocator;
/// The ID number of this value.
unsigned id;
-
+
/// The index of the defining instruction (if isDefAccurate() returns true).
SlotIndex def;
- KillSet kills;
-
- /*
- VNInfo(LiveIntervals &li_)
- : defflags(IS_UNUSED), id(~1U) { cr.copy = 0; }
- */
-
/// VNInfo constructor.
/// d is presumed to point to the actual defining instr. If it doesn't
/// setIsDefAccurate(false) should be called after construction.
@@ -91,7 +83,7 @@ namespace llvm {
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
- : flags(orig.flags), cr(orig.cr), id(i), def(orig.def), kills(orig.kills)
+ : flags(orig.flags), cr(orig.cr), id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
@@ -99,7 +91,6 @@ namespace llvm {
flags = src.flags;
cr = src.cr;
def = src.def;
- kills = src.kills;
}
/// Used for copying value number info.
@@ -114,7 +105,7 @@ namespace llvm {
/// This method should not be called on stack intervals as it may lead to
/// undefined behavior.
void setCopy(MachineInstr *c) { cr.copy = c; }
-
+
/// For a stack interval, returns the reg which this stack interval was
/// defined from.
/// For a register interval the behaviour of this method is undefined.
@@ -144,7 +135,7 @@ namespace llvm {
else
flags &= ~REDEF_BY_EC;
}
-
+
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
@@ -172,49 +163,9 @@ namespace llvm {
void setIsDefAccurate(bool defAccurate) {
if (defAccurate)
flags |= IS_DEF_ACCURATE;
- else
+ else
flags &= ~IS_DEF_ACCURATE;
}
-
- /// Returns true if the given index is a kill of this value.
- bool isKill(SlotIndex k) const {
- KillSet::const_iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- return (i != kills.end() && *i == k);
- }
-
- /// addKill - Add a kill instruction index to the specified value
- /// number.
- void addKill(SlotIndex k) {
- if (kills.empty()) {
- kills.push_back(k);
- } else {
- KillSet::iterator
- i = std::lower_bound(kills.begin(), kills.end(), k);
- kills.insert(i, k);
- }
- }
-
- /// Remove the specified kill index from this value's kills list.
- /// Returns true if the value was present, otherwise returns false.
- bool removeKill(SlotIndex k) {
- KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
- if (i != kills.end() && *i == k) {
- kills.erase(i);
- return true;
- }
- return false;
- }
-
- /// Remove all kills in the range [s, e).
- void removeKills(SlotIndex s, SlotIndex e) {
- KillSet::iterator
- si = std::lower_bound(kills.begin(), kills.end(), s),
- se = std::upper_bound(kills.begin(), kills.end(), e);
-
- kills.erase(si, se);
- }
-
};
/// LiveRange structure - This represents a simple register range in the
@@ -258,6 +209,8 @@ namespace llvm {
LiveRange(); // DO NOT IMPLEMENT
};
+ template <> struct isPodLike<LiveRange> { static const bool value = true; };
+
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
@@ -366,8 +319,8 @@ namespace llvm {
/// the instruction that defines the value number.
VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) {
- VNInfo *VNI = VNInfoAllocator.Allocate();
- new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI);
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
VNI->setIsDefAccurate(isDefAccurate);
valnos.push_back(VNI);
return VNI;
@@ -377,23 +330,12 @@ namespace llvm {
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
VNInfo::Allocator &VNInfoAllocator) {
- VNInfo *VNI = VNInfoAllocator.Allocate();
- new (VNI) VNInfo((unsigned)valnos.size(), *orig);
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
valnos.push_back(VNI);
return VNI;
}
- /// addKills - Add a number of kills into the VNInfo kill vector. If this
- /// interval is live at a kill point, then the kill is not added.
- void addKills(VNInfo *VNI, const VNInfo::KillSet &kills) {
- for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
- i != e; ++i) {
- if (!liveBeforeAndAt(kills[i])) {
- VNI->addKill(kills[i]);
- }
- }
- }
-
/// isOnlyLROfValNo - Return true if the specified live range is the only
/// one defined by the its val#.
bool isOnlyLROfValNo(const LiveRange *LR) {
@@ -472,6 +414,17 @@ namespace llvm {
// range.If it does, then check if the previous live range ends at index-1.
bool liveBeforeAndAt(SlotIndex index) const;
+ /// killedAt - Return true if a live range ends at index. Note that the kill
+ /// point is not contained in the half-open live range. It is usually the
+ /// getDefIndex() slot following its last use.
+ bool killedAt(SlotIndex index) const;
+
+ /// killedInRange - Return true if the interval has kills in [Start,End).
+ /// Note that the kill point is considered the end of a live range, so it is
+ /// not contained in the live range. If a live range ends at End, it won't
+ /// be counted as a kill by this method.
+ bool killedInRange(SlotIndex Start, SlotIndex End) const;
+
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
@@ -486,6 +439,12 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
+ /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+ VNInfo *getVNInfoAt(SlotIndex Idx) const {
+ const_iterator I = FindLiveRangeContaining(Idx);
+ return I == end() ? 0 : I->valno;
+ }
+
/// FindLiveRangeContaining - Return an iterator to the live range that
/// contains the specified index, or end() if there is none.
const_iterator FindLiveRangeContaining(SlotIndex Idx) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 32fa709..c136048 100644
--- a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -133,10 +133,9 @@ namespace llvm {
bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
unsigned reg);
- /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
- /// it checks for sub-register reference and it can check use as well.
- bool conflictsWithSubPhysRegRef(LiveInterval &li, unsigned Reg,
- bool CheckUse,
+ /// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
+ /// it checks for alias uses and defs.
+ bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
SmallPtrSet<MachineInstr*,32> &JoinedCopies);
// Interval creation
@@ -229,10 +228,6 @@ namespace llvm {
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
- /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
- /// copy field and returns the source register that defines it.
- unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
-
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
virtual void releaseMemory();
@@ -249,12 +244,6 @@ namespace llvm {
addIntervalsForSpills(const LiveInterval& i,
SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
-
- /// addIntervalsForSpillsFast - Quickly create new intervals for spilled
- /// defs / uses without remat or splitting.
- std::vector<LiveInterval*>
- addIntervalsForSpillsFast(const LiveInterval &li,
- const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
/// around all defs and uses of the specified interval. Return true if it
@@ -283,10 +272,6 @@ namespace llvm {
unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
unsigned PhysReg) const;
- /// processImplicitDefs - Process IMPLICIT_DEF instructions. Add isUndef
- /// marker to implicit_def defs and their uses.
- void processImplicitDefs();
-
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
/// within a single basic block.
bool intervalIsInOneMBB(const LiveInterval &li) const;
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index cc651ca..3cfc47a 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -19,6 +19,7 @@
namespace llvm {
+class Pass;
class BasicBlock;
class MachineFunction;
class MCSymbol;
@@ -258,6 +259,11 @@ public:
/// machine basic block (i.e., copies all the successors fromMBB and
/// remove all the successors from fromMBB).
void transferSuccessors(MachineBasicBlock *fromMBB);
+
+ /// transferSuccessorsAndUpdatePHIs - Transfers all the successors, as
+ /// in transferSuccessors, and update PHI operands in the successor blocks
+ /// which refer to fromMBB to refer to this.
+ void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
@@ -276,11 +282,26 @@ public:
/// branch to do so (e.g., a table jump). True is a conservative answer.
bool canFallThrough();
+ /// Returns a pointer to the first instructon in this block that is not a
+ /// PHINode instruction. When adding instruction to the beginning of the
+ /// basic block, they should be added before the returned value, not before
+ /// the first instruction, which might be PHI.
+ /// Returns end() is there's no non-PHI instruction.
+ iterator getFirstNonPHI();
+
/// getFirstTerminator - returns an iterator to the first terminator
/// instruction of this basic block. If a terminator does not exist,
/// it returns end()
iterator getFirstTerminator();
+ /// SplitCriticalEdge - Split the critical edge from this block to the
+ /// given successor block, and return the newly created block, or null
+ /// if splitting is not possible.
+ ///
+ /// This function updates LiveVariables, MachineDominatorTree, and
+ /// MachineLoopInfo, as applicable.
+ MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
+
void pop_front() { Insts.pop_front(); }
void pop_back() { Insts.pop_back(); }
void push_back(MachineInstr *MI) { Insts.push_back(MI); }
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index fe2c298..9471316 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -33,16 +33,14 @@ class BitVector;
/// callee saved register in the current frame.
class CalleeSavedInfo {
unsigned Reg;
- const TargetRegisterClass *RegClass;
int FrameIdx;
public:
- CalleeSavedInfo(unsigned R, const TargetRegisterClass *RC, int FI = 0)
- : Reg(R), RegClass(RC), FrameIdx(FI) {}
+ explicit CalleeSavedInfo(unsigned R, int FI = 0)
+ : Reg(R), FrameIdx(FI) {}
// Accessors.
unsigned getReg() const { return Reg; }
- const TargetRegisterClass *getRegClass() const { return RegClass; }
int getFrameIdx() const { return FrameIdx; }
void setFrameIdx(int FI) { FrameIdx = FI; }
};
@@ -100,8 +98,7 @@ class MachineFrameInfo {
// cannot alias any other memory objects.
bool isSpillSlot;
- StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
- bool isSS)
+ StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM, bool isSS)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
isSpillSlot(isSS) {}
};
@@ -352,8 +349,7 @@ public:
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
///
- int CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isSS);
+ int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable);
/// isFixedObjectIndex - Returns true if the specified index corresponds to a
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h
index 1a2b129..685e868 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h
@@ -34,9 +34,6 @@ protected:
explicit MachineFunctionPass(intptr_t ID) : FunctionPass(ID) {}
explicit MachineFunctionPass(void *ID) : FunctionPass(ID) {}
- /// createPrinterPass - Get a machine function printer pass.
- Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
-
/// runOnMachineFunction - This method must be overloaded to perform the
/// desired machine code transformation or analysis.
///
@@ -51,7 +48,11 @@ protected:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
private:
- bool runOnFunction(Function &F);
+ /// createPrinterPass - Get a machine function printer pass.
+ virtual Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const;
+
+ virtual bool runOnFunction(Function &F);
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
index cf691bb..e67b2dd 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -215,9 +215,6 @@ public:
bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
- bool isExtractSubreg() const {
- return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
- }
bool isInsertSubreg() const {
return getOpcode() == TargetOpcode::INSERT_SUBREG;
}
@@ -227,7 +224,22 @@ public:
bool isRegSequence() const {
return getOpcode() == TargetOpcode::REG_SEQUENCE;
}
-
+ bool isCopy() const {
+ return getOpcode() == TargetOpcode::COPY;
+ }
+
+ /// isCopyLike - Return true if the instruction behaves like a copy.
+ /// This does not include native copy instructions.
+ bool isCopyLike() const {
+ return isCopy() || isSubregToReg();
+ }
+
+ /// isIdentityCopy - Return true is the instruction is an identity copy.
+ bool isIdentityCopy() const {
+ return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+ getOperand(0).getSubReg() == getOperand(1).getSubReg();
+ }
+
/// readsRegister - Return true if the MachineInstr reads the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there
/// is a read of a super-register.
@@ -339,6 +351,11 @@ public:
/// copyPredicates - Copies predicate operand(s) from MI.
void copyPredicates(const MachineInstr *MI);
+ /// substituteRegister - Replace all occurrences of FromReg with ToReg:SubIdx,
+ /// properly composing subreg indices where necessary.
+ void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo);
+
/// addRegisterKilled - We have determined MI kills a register. Look for the
/// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
/// add a implicit operand if it's not found. Returns true if the operand
@@ -359,6 +376,11 @@ public:
void addRegisterDefined(unsigned IncomingReg,
const TargetRegisterInfo *RegInfo = 0);
+ /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as dead
+ /// except those in the UsedRegs list.
+ void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ const TargetRegisterInfo &TRI);
+
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
index 1b6ab2c..6264349 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -74,7 +74,7 @@ private:
JTEntryKind EntryKind;
std::vector<MachineJumpTableEntry> JumpTables;
public:
- MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+ explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 8459a8d..3b3e31e 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -64,13 +64,13 @@ class MachineLoopInfo : public MachineFunctionPass {
void operator=(const MachineLoopInfo &); // do not implement
MachineLoopInfo(const MachineLoopInfo &); // do not implement
- LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
-
public:
static char ID; // Pass identification, replacement for typeid
MachineLoopInfo() : MachineFunctionPass(&ID) {}
+ LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
/// iterator/begin/end - The interface to the top-level loops in the current
/// function.
///
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
index 84aef10..50e38b4 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -344,7 +344,7 @@ public:
VariableDbgInfo.push_back(std::make_pair(N, std::make_pair(Slot, Loc)));
}
- VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfo; }
+ VariableDbgInfoMapTy &getVariableDbgInfo();
}; // End class MachineModuleInfo
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
index 31858ce..afa2c29 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -27,6 +27,7 @@ class MachineInstr;
class MachineRegisterInfo;
class MDNode;
class TargetMachine;
+class TargetRegisterInfo;
class raw_ostream;
class MCSymbol;
@@ -246,7 +247,20 @@ public:
assert(isReg() && "Wrong MachineOperand accessor");
SubReg = (unsigned char)subReg;
}
-
+
+ /// substVirtReg - Substitute the current register with the virtual
+ /// subregister Reg:SubReg. Take any existing SubReg index into account,
+ /// using TargetRegisterInfo to compose the subreg indices if necessary.
+ /// Reg must be a virtual register, SubIdx can be 0.
+ ///
+ void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+ /// substPhysReg - Substitute the current register with the physical register
+ /// Reg, taking any existing SubReg into account. For instance,
+ /// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
+ ///
+ void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
void setIsUse(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((Val || !isDebug()) && "Marking a debug operation as def");
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index fa14fdc..066c91b 100644
--- a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -35,7 +35,7 @@ class MachineRegisterInfo {
/// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
/// virtual registers. For each target register class, it keeps a list of
/// virtual registers belonging to the class.
- std::vector<std::vector<unsigned> > RegClass2VRegMap;
+ std::vector<unsigned> *RegClass2VRegMap;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
@@ -363,7 +363,18 @@ public:
defusechain_iterator operator++(int) { // Postincrement
defusechain_iterator tmp = *this; ++*this; return tmp;
}
-
+
+ /// skipInstruction - move forward until reaching a different instruction.
+ /// Return the skipped instruction that is no longer pointed to, or NULL if
+ /// already pointing to end().
+ MachineInstr *skipInstruction() {
+ if (!Op) return 0;
+ MachineInstr *MI = Op->getParent();
+ do ++*this;
+ while (Op && Op->getParent() == MI);
+ return MI;
+ }
+
MachineOperand &getOperand() const {
assert(Op && "Cannot dereference end iterator!");
return *Op;
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
index 2f5d576..7445ec7 100644
--- a/contrib/llvm/include/llvm/CodeGen/Passes.h
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -85,15 +85,10 @@ namespace llvm {
///
FunctionPass *createDeadMachineInstructionElimPass();
- /// Creates a register allocator as the user specified on the command line.
+ /// Creates a register allocator as the user specified on the command line, or
+ /// picks one that matches OptLevel.
///
- FunctionPass *createRegisterAllocator();
-
- /// LocalRegisterAllocation Pass - This pass register allocates the input code
- /// a basic block at a time, yielding code better than the simple register
- /// allocator, but not as good as a global allocator.
- ///
- FunctionPass *createLocalRegisterAllocator();
+ FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel);
/// FastRegisterAllocation Pass - This pass register allocates as fast as
/// possible. It is best suited for debug code where live ranges are short.
@@ -147,10 +142,6 @@ namespace llvm {
/// headers to target specific alignment boundary.
FunctionPass *createCodePlacementOptPass();
- /// getRegisterAllocator - This creates an instance of the register allocator
- /// for the Sparc.
- FunctionPass *getRegisterAllocator(TargetMachine &T);
-
/// IntrinsicLowering Pass - Performs target-independent LLVM IR
/// transformations for highly portable strategies.
FunctionPass *createGCLoweringPass();
diff --git a/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
new file mode 100644
index 0000000..24d73cb
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PostRAHazardRecognizer.h
@@ -0,0 +1,94 @@
+//=- llvm/CodeGen/PostRAHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PostRAHazardRecognizer class, which
+// implements hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/System/DataTypes.h"
+
+#include <cassert>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+
+class InstrItineraryData;
+class SUnit;
+
+class PostRAHazardRecognizer : public ScheduleHazardRecognizer {
+ // ScoreBoard to track function unit usage. ScoreBoard[0] is a
+ // mask of the FUs in use in the cycle currently being
+ // schedule. ScoreBoard[1] is a mask for the next cycle. The
+ // ScoreBoard is used as a circular buffer with the current cycle
+ // indicated by Head.
+ class ScoreBoard {
+ unsigned *Data;
+
+ // The maximum number of cycles monitored by the Scoreboard. This
+ // value is determined based on the target itineraries to ensure
+ // that all hazards can be tracked.
+ size_t Depth;
+ // Indices into the Scoreboard that represent the current cycle.
+ size_t Head;
+ public:
+ ScoreBoard():Data(NULL), Depth(0), Head(0) { }
+ ~ScoreBoard() {
+ delete[] Data;
+ }
+
+ size_t getDepth() const { return Depth; }
+ unsigned& operator[](size_t idx) const {
+ assert(Depth && "ScoreBoard was not initialized properly!");
+
+ return Data[(Head + idx) % Depth];
+ }
+
+ void reset(size_t d = 1) {
+ if (Data == NULL) {
+ Depth = d;
+ Data = new unsigned[Depth];
+ }
+
+ memset(Data, 0, Depth * sizeof(Data[0]));
+ Head = 0;
+ }
+
+ void advance() {
+ Head = (Head + 1) % Depth;
+ }
+
+ // Print the scoreboard.
+ void dump() const;
+ };
+
+ // Itinerary data for the target.
+ const InstrItineraryData &ItinData;
+
+ ScoreBoard ReservedScoreboard;
+ ScoreBoard RequiredScoreboard;
+
+public:
+ PostRAHazardRecognizer(const InstrItineraryData &ItinData);
+
+ virtual HazardType getHazardType(SUnit *SU);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+ virtual void AdvanceCycle();
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h b/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
index cec867f..30477b9 100644
--- a/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
+++ b/contrib/llvm/include/llvm/CodeGen/ProcessImplicitDefs.h
@@ -12,6 +12,7 @@
#define LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/ADT/SmallSet.h"
namespace llvm {
@@ -24,7 +25,8 @@ namespace llvm {
private:
bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
- unsigned OpIdx, const TargetInstrInfo *tii_);
+ unsigned OpIdx, const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs);
public:
static char ID;
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h b/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
index 1490aa0..7644433 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterCoalescer.h
@@ -25,6 +25,9 @@ namespace llvm {
class RegallocQuery;
class AnalysisUsage;
class MachineInstr;
+ class TargetRegisterInfo;
+ class TargetRegisterClass;
+ class TargetInstrInfo;
/// An abstract interface for register coalescers. Coalescers must
/// implement this interface to be part of the coalescer analysis
@@ -141,6 +144,93 @@ namespace llvm {
return true;
}
};
+
+
+ /// CoalescerPair - A helper class for register coalescers. When deciding if
+ /// two registers can be coalesced, CoalescerPair can determine if a copy
+ /// instruction would become an identity copy after coalescing.
+ class CoalescerPair {
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+
+ /// dstReg_ - The register that will be left after coalescing. It can be a
+ /// virtual or physical register.
+ unsigned dstReg_;
+
+ /// srcReg_ - the virtual register that will be coalesced into dstReg.
+ unsigned srcReg_;
+
+ /// subReg_ - The subregister index of srcReg in dstReg_. It is possible the
+ /// coalesce srcReg_ into a subreg of the larger dstReg_ when dstReg_ is a
+ /// virtual register.
+ unsigned subIdx_;
+
+ /// partial_ - True when the original copy was a partial subregister copy.
+ bool partial_;
+
+ /// crossClass_ - True when both regs are virtual, and newRC is constrained.
+ bool crossClass_;
+
+ /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
+ /// instruction.
+ bool flipped_;
+
+ /// newRC_ - The register class of the coalesced register, or NULL if dstReg_
+ /// is a physreg.
+ const TargetRegisterClass *newRC_;
+
+ /// compose - Compose subreg indices a and b, either may be 0.
+ unsigned compose(unsigned, unsigned) const;
+
+ /// isMoveInstr - Return true if MI is a move or subreg instruction.
+ bool isMoveInstr(const MachineInstr *MI, unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const;
+
+ public:
+ CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
+ : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
+ partial_(false), crossClass_(false), flipped_(false), newRC_(0) {}
+
+ /// setRegisters - set registers to match the copy instruction MI. Return
+ /// false if MI is not a coalescable copy instruction.
+ bool setRegisters(const MachineInstr*);
+
+ /// flip - Swap srcReg_ and dstReg_. Return false if swapping is impossible
+ /// because dstReg_ is a physical register, or subIdx_ is set.
+ bool flip();
+
+ /// isCoalescable - Return true if MI is a copy instruction that will become
+ /// an identity copy after coalescing.
+ bool isCoalescable(const MachineInstr*) const;
+
+ /// isPhys - Return true if DstReg is a physical register.
+ bool isPhys() const { return !newRC_; }
+
+ /// isPartial - Return true if the original copy instruction did not copy the
+ /// full register, but was a subreg operation.
+ bool isPartial() const { return partial_; }
+
+ /// isCrossClass - Return true if DstReg is virtual and NewRC is a smaller register class than DstReg's.
+ bool isCrossClass() const { return crossClass_; }
+
+ /// isFlipped - Return true when getSrcReg is the register being defined by
+ /// the original copy instruction.
+ bool isFlipped() const { return flipped_; }
+
+ /// getDstReg - Return the register (virtual or physical) that will remain
+ /// after coalescing.
+ unsigned getDstReg() const { return dstReg_; }
+
+ /// getSrcReg - Return the virtual register that will be coalesced away.
+ unsigned getSrcReg() const { return srcReg_; }
+
+ /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
+ /// coalesced into, or 0.
+ unsigned getSubIdx() const { return subIdx_; }
+
+ /// getNewRC - Return the register class of the coalesced register.
+ const TargetRegisterClass *getNewRC() const { return newRC_; }
+ };
}
// Because of the way .a files work, we must force the SimpleRC
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
index 84b726d..246831c 100644
--- a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
@@ -98,6 +98,10 @@ public:
/// getRegsUsed - return all registers currently in use in used.
void getRegsUsed(BitVector &used, bool includeReserved);
+ /// getRegsAvailable - Return all available registers in the register class
+ /// in Mask.
+ void getRegsAvailable(const TargetRegisterClass *RC, BitVector &Mask);
+
/// FindUnusedReg - Find a unused register of the specified register class.
/// Return 0 if none is found.
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
@@ -147,7 +151,12 @@ private:
/// Add Reg and its aliases to BV.
void addRegWithAliases(BitVector &BV, unsigned Reg);
- unsigned findSurvivorReg(MachineBasicBlock::iterator MI,
+ /// findSurvivorReg - Return the candidate register that is unused for the
+ /// longest after StartMI. UseMI is set to the instruction where the search
+ /// stopped.
+ ///
+ /// No more than InstrLimit instructions are inspected.
+ unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI);
diff --git a/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h b/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
index 42ae563..a51e82a 100644
--- a/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -247,6 +247,40 @@ namespace RTLIB {
// EXCEPTION HANDLING
UNWIND_RESUME,
+ // Family ATOMICs
+ SYNC_VAL_COMPARE_AND_SWAP_1,
+ SYNC_VAL_COMPARE_AND_SWAP_2,
+ SYNC_VAL_COMPARE_AND_SWAP_4,
+ SYNC_VAL_COMPARE_AND_SWAP_8,
+ SYNC_LOCK_TEST_AND_SET_1,
+ SYNC_LOCK_TEST_AND_SET_2,
+ SYNC_LOCK_TEST_AND_SET_4,
+ SYNC_LOCK_TEST_AND_SET_8,
+ SYNC_FETCH_AND_ADD_1,
+ SYNC_FETCH_AND_ADD_2,
+ SYNC_FETCH_AND_ADD_4,
+ SYNC_FETCH_AND_ADD_8,
+ SYNC_FETCH_AND_SUB_1,
+ SYNC_FETCH_AND_SUB_2,
+ SYNC_FETCH_AND_SUB_4,
+ SYNC_FETCH_AND_SUB_8,
+ SYNC_FETCH_AND_AND_1,
+ SYNC_FETCH_AND_AND_2,
+ SYNC_FETCH_AND_AND_4,
+ SYNC_FETCH_AND_AND_8,
+ SYNC_FETCH_AND_OR_1,
+ SYNC_FETCH_AND_OR_2,
+ SYNC_FETCH_AND_OR_4,
+ SYNC_FETCH_AND_OR_8,
+ SYNC_FETCH_AND_XOR_1,
+ SYNC_FETCH_AND_XOR_2,
+ SYNC_FETCH_AND_XOR_4,
+ SYNC_FETCH_AND_XOR_8,
+ SYNC_FETCH_AND_NAND_1,
+ SYNC_FETCH_AND_NAND_2,
+ SYNC_FETCH_AND_NAND_4,
+ SYNC_FETCH_AND_NAND_8,
+
UNKNOWN_LIBCALL
};
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
index 97202bd..de49d18 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -29,7 +29,6 @@
namespace llvm {
class AliasAnalysis;
-class FunctionLoweringInfo;
class MachineConstantPoolValue;
class MachineFunction;
class MDNode;
@@ -134,7 +133,6 @@ class SelectionDAG {
const TargetLowering &TLI;
const TargetSelectionDAGInfo &TSI;
MachineFunction *MF;
- FunctionLoweringInfo &FLI;
LLVMContext *Context;
/// EntryNode - The starting token.
@@ -187,7 +185,7 @@ class SelectionDAG {
SelectionDAG(const SelectionDAG&); // Do not implement.
public:
- SelectionDAG(const TargetMachine &TM, FunctionLoweringInfo &fli);
+ explicit SelectionDAG(const TargetMachine &TM);
~SelectionDAG();
/// init - Prepare this SelectionDAG to process code in the given
@@ -204,7 +202,6 @@ public:
const TargetMachine &getTarget() const { return TM; }
const TargetLowering &getTargetLoweringInfo() const { return TLI; }
const TargetSelectionDAGInfo &getSelectionDAGInfo() const { return TSI; }
- FunctionLoweringInfo &getFunctionLoweringInfo() const { return FLI; }
LLVMContext *getContext() const {return Context; }
/// viewGraph - Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
@@ -351,13 +348,13 @@ public:
SDValue getTargetConstantFP(const ConstantFP &Val, EVT VT) {
return getConstantFP(Val, VT, true);
}
- SDValue getGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0, bool isTargetGA = false,
unsigned char TargetFlags = 0);
- SDValue getTargetGlobalAddress(const GlobalValue *GV, EVT VT,
+ SDValue getTargetGlobalAddress(const GlobalValue *GV, DebugLoc DL, EVT VT,
int64_t offset = 0,
unsigned char TargetFlags = 0) {
- return getGlobalAddress(GV, VT, offset, true, TargetFlags);
+ return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
}
SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
SDValue getTargetFrameIndex(int FI, EVT VT) {
@@ -585,7 +582,7 @@ public:
/// getVAArg - VAArg produces a result and token chain, and takes a pointer
/// and a source value as input.
SDValue getVAArg(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
- SDValue SV);
+ SDValue SV, unsigned Align);
/// getAtomic - Gets a node for an atomic op, produces result and chain and
/// takes 3 operands
@@ -635,18 +632,20 @@ public:
SDValue getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
const Value *SV, int SVOffset, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
- SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+ SDValue getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr, const Value *SV,
int SVOffset, EVT MemVT, bool isVolatile,
bool isNonTemporal, unsigned Alignment);
SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
- SDValue Offset, ISD::MemIndexedMode AM);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue Offset, ISD::MemIndexedMode AM);
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal, unsigned Alignment);
- SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
- EVT VT, SDValue Chain, SDValue Ptr, SDValue Offset,
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
EVT MemVT, MachineMemOperand *MMO);
/// getStore - Helper function to build ISD::STORE nodes.
@@ -681,15 +680,15 @@ public:
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
- SDValue UpdateNodeOperands(SDValue N, SDValue Op);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
- SDValue UpdateNodeOperands(SDValue N,
+ SDNode *UpdateNodeOperands(SDNode *N,
const SDValue *Ops, unsigned NumOps);
/// SelectNodeTo - These are used for target selectors to *mutate* the
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
index 3817580..01d05dd 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -280,19 +280,16 @@ private:
SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
- void PrepareEHLandingPad(MachineBasicBlock *BB);
+ void PrepareEHLandingPad();
void SelectAllBasicBlocks(const Function &Fn);
- void FinishBasicBlock(MachineBasicBlock *BB);
+ void FinishBasicBlock();
- MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB,
- const BasicBlock *LLVMBB,
- BasicBlock::const_iterator Begin,
- BasicBlock::const_iterator End,
- bool &HadTailCall);
- MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
+ void SelectBasicBlock(BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall);
+ void CodeGenAndEmitDAG();
void LowerArguments(const BasicBlock *BB);
- void ShrinkDemandedOps();
void ComputeLiveOutVRegInfo();
/// Create the scheduler. If a specific scheduler was specified
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index fd529b6..4cf6f36 100644
--- a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -549,6 +549,15 @@ public:
return FoundNode;
}
+ /// getFlaggedUser - If this node has a flag value with a user, return
+ /// the user (there is at most one). Otherwise return NULL.
+ SDNode *getFlaggedUser() const {
+ for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+ if (UI.getUse().get().getValueType() == MVT::Flag)
+ return *UI;
+ return 0;
+ }
+
/// getNumValues - Return the number of values defined/returned by this
/// operator.
///
@@ -1082,6 +1091,7 @@ public:
uint64_t getZExtValue() const { return Value->getZExtValue(); }
int64_t getSExtValue() const { return Value->getSExtValue(); }
+ bool isOne() const { return Value->isOne(); }
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
@@ -1130,7 +1140,7 @@ public:
}
bool isExactlyValue(const APFloat& V) const;
- bool isValueValidForType(EVT VT, const APFloat& Val);
+ static bool isValueValidForType(EVT VT, const APFloat& Val);
static bool classof(const ConstantFPSDNode *) { return true; }
static bool classof(const SDNode *N) {
@@ -1144,7 +1154,7 @@ class GlobalAddressSDNode : public SDNode {
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
- GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA, EVT VT,
+ GlobalAddressSDNode(unsigned Opc, DebugLoc DL, const GlobalValue *GA, EVT VT,
int64_t o, unsigned char TargetFlags);
public:
@@ -1454,125 +1464,6 @@ public:
}
};
-namespace ISD {
- struct ArgFlagsTy {
- private:
- static const uint64_t NoFlagSet = 0ULL;
- static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
- static const uint64_t ZExtOffs = 0;
- static const uint64_t SExt = 1ULL<<1; ///< Sign extended
- static const uint64_t SExtOffs = 1;
- static const uint64_t InReg = 1ULL<<2; ///< Passed in register
- static const uint64_t InRegOffs = 2;
- static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
- static const uint64_t SRetOffs = 3;
- static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
- static const uint64_t ByValOffs = 4;
- static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
- static const uint64_t NestOffs = 5;
- static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
- static const uint64_t ByValAlignOffs = 6;
- static const uint64_t Split = 1ULL << 10;
- static const uint64_t SplitOffs = 10;
- static const uint64_t OrigAlign = 0x1FULL<<27;
- static const uint64_t OrigAlignOffs = 27;
- static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
- static const uint64_t ByValSizeOffs = 32;
-
- static const uint64_t One = 1ULL; //< 1 of this type, for shifts
-
- uint64_t Flags;
- public:
- ArgFlagsTy() : Flags(0) { }
-
- bool isZExt() const { return Flags & ZExt; }
- void setZExt() { Flags |= One << ZExtOffs; }
-
- bool isSExt() const { return Flags & SExt; }
- void setSExt() { Flags |= One << SExtOffs; }
-
- bool isInReg() const { return Flags & InReg; }
- void setInReg() { Flags |= One << InRegOffs; }
-
- bool isSRet() const { return Flags & SRet; }
- void setSRet() { Flags |= One << SRetOffs; }
-
- bool isByVal() const { return Flags & ByVal; }
- void setByVal() { Flags |= One << ByValOffs; }
-
- bool isNest() const { return Flags & Nest; }
- void setNest() { Flags |= One << NestOffs; }
-
- unsigned getByValAlign() const {
- return (unsigned)
- ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
- }
- void setByValAlign(unsigned A) {
- Flags = (Flags & ~ByValAlign) |
- (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
- }
-
- bool isSplit() const { return Flags & Split; }
- void setSplit() { Flags |= One << SplitOffs; }
-
- unsigned getOrigAlign() const {
- return (unsigned)
- ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
- }
- void setOrigAlign(unsigned A) {
- Flags = (Flags & ~OrigAlign) |
- (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
- }
-
- unsigned getByValSize() const {
- return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
- }
- void setByValSize(unsigned S) {
- Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
- }
-
- /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
- std::string getArgFlagsString();
-
- /// getRawBits - Represent the flags as a bunch of bits.
- uint64_t getRawBits() const { return Flags; }
- };
-
- /// InputArg - This struct carries flags and type information about a
- /// single incoming (formal) argument or incoming (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct InputArg {
- ArgFlagsTy Flags;
- EVT VT;
- bool Used;
-
- InputArg() : VT(MVT::Other), Used(false) {}
- InputArg(ISD::ArgFlagsTy flags, EVT vt, bool used)
- : Flags(flags), VT(vt), Used(used) {
- assert(VT.isSimple() &&
- "InputArg value type must be Simple!");
- }
- };
-
- /// OutputArg - This struct carries flags and a value for a
- /// single outgoing (actual) argument or outgoing (from the perspective
- /// of the caller) return value virtual register.
- ///
- struct OutputArg {
- ArgFlagsTy Flags;
- SDValue Val;
- bool IsFixed;
-
- OutputArg() : IsFixed(false) {}
- OutputArg(ISD::ArgFlagsTy flags, SDValue val, bool isfixed)
- : Flags(flags), Val(val), IsFixed(isfixed) {
- assert(Val.getValueType().isSimple() &&
- "OutputArg value type must be Simple!");
- }
- };
-}
-
/// VTSDNode - This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
index 3c56d0d..f1f047b 100644
--- a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -23,6 +23,7 @@
#define LLVM_CODEGEN_SLOTINDEXES_H
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
@@ -663,15 +664,20 @@ namespace llvm {
MachineBasicBlock::iterator miItr(mi);
bool needRenumber = false;
IndexListEntry *newEntry;
-
+ // Get previous index, considering that not all instructions are indexed.
IndexListEntry *prevEntry;
- if (miItr == mbb->begin()) {
+ for (;;) {
// If mi is at the mbb beginning, get the prev index from the mbb.
- prevEntry = &mbbRangeItr->second.first.entry();
- } else {
- // Otherwise get it from the previous instr.
- MachineBasicBlock::iterator pItr(prior(miItr));
- prevEntry = &getInstructionIndex(pItr).entry();
+ if (miItr == mbb->begin()) {
+ prevEntry = &mbbRangeItr->second.first.entry();
+ break;
+ }
+ // Otherwise rewind until we find a mapped instruction.
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(--miItr);
+ if (itr != mi2iMap.end()) {
+ prevEntry = &itr->second.entry();
+ break;
+ }
}
// Get next entry from previous entry.
@@ -757,6 +763,47 @@ namespace llvm {
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
}
+ /// Add the given MachineBasicBlock into the maps.
+ void insertMBBInMaps(MachineBasicBlock *mbb) {
+ MachineFunction::iterator nextMBB =
+ llvm::next(MachineFunction::iterator(mbb));
+ IndexListEntry *startEntry = createEntry(0, 0);
+ IndexListEntry *terminatorEntry = createEntry(0, 0);
+ IndexListEntry *nextEntry = 0;
+
+ if (nextMBB == mbb->getParent()->end()) {
+ nextEntry = getTail();
+ } else {
+ nextEntry = &getMBBStartIdx(nextMBB).entry();
+ }
+
+ insert(nextEntry, startEntry);
+ insert(nextEntry, terminatorEntry);
+
+ SlotIndex startIdx(startEntry, SlotIndex::LOAD);
+ SlotIndex terminatorIdx(terminatorEntry, SlotIndex::PHI_BIT);
+ SlotIndex endIdx(nextEntry, SlotIndex::LOAD);
+
+ terminatorGaps.insert(
+ std::make_pair(mbb, terminatorIdx));
+
+ mbb2IdxMap.insert(
+ std::make_pair(mbb, std::make_pair(startIdx, endIdx)));
+
+ idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+ if (MachineFunction::iterator(mbb) != mbb->getParent()->begin()) {
+ // Have to update the end index of the previous block.
+ MachineBasicBlock *priorMBB =
+ llvm::prior(MachineFunction::iterator(mbb));
+ mbb2IdxMap[priorMBB].second = startIdx;
+ }
+
+ renumberIndexes();
+ std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+
+ }
+
};
diff --git a/contrib/llvm/include/llvm/Config/config.h.in b/contrib/llvm/include/llvm/Config/config.h.in
index 99d2ab5..d12f82a 100644
--- a/contrib/llvm/include/llvm/Config/config.h.in
+++ b/contrib/llvm/include/llvm/Config/config.h.in
@@ -63,6 +63,9 @@
/* Define to 1 if you have the `closedir' function. */
#undef HAVE_CLOSEDIR
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#undef HAVE_CRASHREPORTERCLIENT_H
+
/* Define to 1 if you have the <ctype.h> header file. */
#undef HAVE_CTYPE_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
index c3f1902..3287b39 100644
--- a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -174,8 +174,8 @@ public:
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
- CodeModel::Model CMM =
- CodeModel::Default);
+ CodeModel::Model CMM =
+ CodeModel::Default);
/// addModule - Add a Module to the list of modules that we can JIT from.
/// Note that this takes ownership of the Module: when the ExecutionEngine is
diff --git a/contrib/llvm/include/llvm/GlobalValue.h b/contrib/llvm/include/llvm/GlobalValue.h
index 658967d..d175080 100644
--- a/contrib/llvm/include/llvm/GlobalValue.h
+++ b/contrib/llvm/include/llvm/GlobalValue.h
@@ -40,6 +40,7 @@ public:
InternalLinkage, ///< Rename collisions when linking (static functions).
PrivateLinkage, ///< Like Internal, but omit from symbol table.
LinkerPrivateLinkage, ///< Like Private, but linker removes.
+ LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
DLLImportLinkage, ///< Function to be imported from DLL
DLLExportLinkage, ///< Function to be accessible from DLL.
ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -132,11 +133,14 @@ public:
return Linkage == PrivateLinkage;
}
static bool isLinkerPrivateLinkage(LinkageTypes Linkage) {
- return Linkage==LinkerPrivateLinkage;
+ return Linkage == LinkerPrivateLinkage;
+ }
+ static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkerPrivateWeakLinkage;
}
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
- isLinkerPrivateLinkage(Linkage);
+ isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
}
static bool isDLLImportLinkage(LinkageTypes Linkage) {
return Linkage == DLLImportLinkage;
@@ -158,7 +162,8 @@ public:
return (Linkage == WeakAnyLinkage ||
Linkage == LinkOnceAnyLinkage ||
Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage);
}
/// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -170,7 +175,8 @@ public:
Linkage == LinkOnceAnyLinkage ||
Linkage == LinkOnceODRLinkage ||
Linkage == CommonLinkage ||
- Linkage == ExternalWeakLinkage);
+ Linkage == ExternalWeakLinkage ||
+ Linkage == LinkerPrivateWeakLinkage);
}
bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -187,6 +193,9 @@ public:
bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
bool hasLinkerPrivateLinkage() const { return isLinkerPrivateLinkage(Linkage); }
+ bool hasLinkerPrivateWeakLinkage() const {
+ return isLinkerPrivateWeakLinkage(Linkage);
+ }
bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }
diff --git a/contrib/llvm/include/llvm/InlineAsm.h b/contrib/llvm/include/llvm/InlineAsm.h
index f4d125b..105b1bc 100644
--- a/contrib/llvm/include/llvm/InlineAsm.h
+++ b/contrib/llvm/include/llvm/InlineAsm.h
@@ -154,7 +154,8 @@ public:
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
- Op_FirstOperand = 3,
+ Op_IsAlignStack = 3,
+ Op_FirstOperand = 4,
Kind_RegUse = 1,
Kind_RegDef = 2,
diff --git a/contrib/llvm/include/llvm/InstrTypes.h b/contrib/llvm/include/llvm/InstrTypes.h
index 49cdd6a..6715416 100644
--- a/contrib/llvm/include/llvm/InstrTypes.h
+++ b/contrib/llvm/include/llvm/InstrTypes.h
@@ -612,7 +612,7 @@ public:
/// A lossless cast is one that does not alter the basic value. It implies
/// a no-op cast but is more stringent, preventing things like int->float,
- /// long->double, int->ptr, or vector->anything.
+ /// long->double, or int->ptr.
/// @returns true iff the cast is lossless.
/// @brief Determine if this is a lossless cast.
bool isLosslessCast() const;
@@ -625,6 +625,14 @@ public:
/// platform. Generally, the result of TargetData::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative.
+ /// @brief Determine if the described cast is a no-op cast.
+ static bool isNoopCast(
+ Instruction::CastOps Opcode, ///< Opcode of cast
+ const Type *SrcTy, ///< SrcTy of cast
+ const Type *DstTy, ///< DstTy of cast
+ const Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null
+ );
+
/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
const Type *IntPtrTy ///< Integer type corresponding to pointer
diff --git a/contrib/llvm/include/llvm/Instructions.h b/contrib/llvm/include/llvm/Instructions.h
index 413a595..af93a29 100644
--- a/contrib/llvm/include/llvm/Instructions.h
+++ b/contrib/llvm/include/llvm/Instructions.h
@@ -235,6 +235,9 @@ public:
void setAlignment(unsigned Align);
+ Value *getValueOperand() { return getOperand(0); }
+ const Value *getValueOperand() const { return getOperand(0); }
+
Value *getPointerOperand() { return getOperand(1); }
const Value *getPointerOperand() const { return getOperand(1); }
static unsigned getPointerOperandIndex() { return 1U; }
@@ -883,14 +886,14 @@ public:
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr = "",
Instruction *InsertBefore = 0) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertBefore);
}
template<typename InputIterator>
static CallInst *Create(Value *Func,
InputIterator ArgBegin, InputIterator ArgEnd,
const Twine &NameStr, BasicBlock *InsertAtEnd) {
- return new((unsigned)(ArgEnd - ArgBegin + 1))
+ return new(unsigned(ArgEnd - ArgBegin + 1))
CallInst(Func, ArgBegin, ArgEnd, NameStr, InsertAtEnd);
}
static CallInst *Create(Value *F, Value *Actual,
@@ -919,6 +922,7 @@ public:
static Instruction *CreateMalloc(Instruction *InsertBefore,
const Type *IntPtrTy, const Type *AllocTy,
Value *AllocSize, Value *ArraySize = 0,
+ Function* MallocF = 0,
const Twine &Name = "");
static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
const Type *IntPtrTy, const Type *AllocTy,
@@ -926,7 +930,7 @@ public:
Function* MallocF = 0,
const Twine &Name = "");
/// CreateFree - Generate the IR for a call to the builtin free function.
- static void CreateFree(Value* Source, Instruction *InsertBefore);
+ static Instruction* CreateFree(Value* Source, Instruction *InsertBefore);
static Instruction* CreateFree(Value* Source, BasicBlock *InsertAtEnd);
~CallInst();
@@ -937,8 +941,33 @@ public:
unsigned(isTC));
}
+ /// @deprecated these "define hacks" will go away soon
+ /// @brief coerce out-of-tree code to abandon the low-level interfaces
+ /// @detail see below comments and update your code to high-level interfaces
+ /// - getOperand(0) ---> getCalledValue(), or possibly getCalledFunction
+ /// - setOperand(0, V) ---> setCalledFunction(V)
+ ///
+ /// in LLVM v2.8-only code
+ /// - getOperand(N+1) ---> getArgOperand(N)
+ /// - setOperand(N+1, V) ---> setArgOperand(N, V)
+ /// - getNumOperands() ---> getNumArgOperands()+1 // note the "+1"!
+ ///
+ /// in backward compatible code please consult llvm/Support/CallSite.h,
+ /// you should create a callsite using the CallInst pointer and call its
+ /// methods
+ ///
+# define public private
+# define protected private
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+# undef public
+# undef protected
+public:
+
+ enum { ArgOffset = 0 }; ///< temporary, do not use for new code!
+ unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+ Value *getArgOperand(unsigned i) const { return getOperand(i + ArgOffset); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i + ArgOffset, v); }
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
@@ -974,7 +1003,7 @@ public:
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
@@ -998,18 +1027,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
@@ -1031,17 +1056,17 @@ public:
/// indirect function invocation.
///
Function *getCalledFunction() const {
- return dyn_cast<Function>(Op<0>());
+ return dyn_cast<Function>(Op<ArgOffset -1>());
}
/// getCalledValue - Get a pointer to the function that is invoked by this
/// instruction.
- const Value *getCalledValue() const { return Op<0>(); }
- Value *getCalledValue() { return Op<0>(); }
+ const Value *getCalledValue() const { return Op<ArgOffset -1>(); }
+ Value *getCalledValue() { return Op<ArgOffset -1>(); }
/// setCalledFunction - Set the function called.
void setCalledFunction(Value* Fn) {
- Op<0>() = Fn;
+ Op<ArgOffset -1>() = Fn;
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1071,7 +1096,7 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertAtEnd) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertAtEnd) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
@@ -1083,11 +1108,15 @@ CallInst::CallInst(Value *Func, InputIterator ArgBegin, InputIterator ArgEnd,
->getElementType())->getReturnType(),
Instruction::Call,
OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
- (unsigned)(ArgEnd - ArgBegin + 1), InsertBefore) {
+ unsigned(ArgEnd - ArgBegin + 1), InsertBefore) {
init(Func, ArgBegin, ArgEnd, NameStr,
typename std::iterator_traits<InputIterator>::iterator_category());
}
+
+// Note: if you get compile errors about private methods then
+// please update your code to use the high-level operand
+// interfaces. See line 943 above.
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
//===----------------------------------------------------------------------===//
@@ -2432,6 +2461,10 @@ public:
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+ unsigned getNumArgOperands() const { return getNumOperands() - 3; }
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
/// getCallingConv/setCallingConv - Get or set the calling convention of this
/// function call.
CallingConv::ID getCallingConv() const {
@@ -2465,11 +2498,11 @@ public:
/// @brief Return true if the call should not be inlined.
bool isNoInline() const { return paramHasAttr(~0, Attribute::NoInline); }
- void setIsNoInline(bool Value) {
+ void setIsNoInline(bool Value = true) {
if (Value) addAttribute(~0, Attribute::NoInline);
else removeAttribute(~0, Attribute::NoInline);
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
return paramHasAttr(~0, Attribute::ReadNone);
@@ -2489,18 +2522,14 @@ public:
}
/// @brief Determine if the call cannot return.
- bool doesNotReturn() const {
- return paramHasAttr(~0, Attribute::NoReturn);
- }
+ bool doesNotReturn() const { return paramHasAttr(~0, Attribute::NoReturn); }
void setDoesNotReturn(bool DoesNotReturn = true) {
if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn);
else removeAttribute(~0, Attribute::NoReturn);
}
/// @brief Determine if the call cannot unwind.
- bool doesNotThrow() const {
- return paramHasAttr(~0, Attribute::NoUnwind);
- }
+ bool doesNotThrow() const { return paramHasAttr(~0, Attribute::NoUnwind); }
void setDoesNotThrow(bool DoesNotThrow = true) {
if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind);
else removeAttribute(~0, Attribute::NoUnwind);
diff --git a/contrib/llvm/include/llvm/IntrinsicInst.h b/contrib/llvm/include/llvm/IntrinsicInst.h
index 5b0e90f..48f2da9 100644
--- a/contrib/llvm/include/llvm/IntrinsicInst.h
+++ b/contrib/llvm/include/llvm/IntrinsicInst.h
@@ -43,7 +43,7 @@ namespace llvm {
Intrinsic::ID getIntrinsicID() const {
return (Intrinsic::ID)getCalledFunction()->getIntrinsicID();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const IntrinsicInst *) { return true; }
static inline bool classof(const CallInst *I) {
@@ -74,7 +74,7 @@ namespace llvm {
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
-
+
static Value *StripCast(Value *C);
};
@@ -83,7 +83,7 @@ namespace llvm {
class DbgDeclareInst : public DbgInfoIntrinsic {
public:
Value *getAddress() const;
- MDNode *getVariable() const { return cast<MDNode>(getOperand(2)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgDeclareInst *) { return true; }
@@ -103,9 +103,9 @@ namespace llvm {
Value *getValue();
uint64_t getOffset() const {
return cast<ConstantInt>(
- const_cast<Value*>(getOperand(2)))->getZExtValue();
+ const_cast<Value*>(getArgOperand(1)))->getZExtValue();
}
- MDNode *getVariable() const { return cast<MDNode>(getOperand(3)); }
+ MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const DbgValueInst *) { return true; }
@@ -121,22 +121,22 @@ namespace llvm {
///
class MemIntrinsic : public IntrinsicInst {
public:
- Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); }
+ Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
- Value *getLength() const { return const_cast<Value*>(getOperand(3)); }
+ Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
ConstantInt *getAlignmentCst() const {
- return cast<ConstantInt>(const_cast<Value*>(getOperand(4)));
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
}
-
+
unsigned getAlignment() const {
return getAlignmentCst()->getZExtValue();
}
ConstantInt *getVolatileCst() const {
- return cast<ConstantInt>(const_cast<Value*>(getOperand(5)));
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
}
bool isVolatile() const {
- return getVolatileCst()->getZExtValue() != 0;
+ return !getVolatileCst()->isZero();
}
/// getDest - This is just like getRawDest, but it strips off any cast
@@ -149,27 +149,27 @@ namespace llvm {
void setDest(Value *Ptr) {
assert(getRawDest()->getType() == Ptr->getType() &&
"setDest called with pointer of wrong type!");
- setOperand(1, Ptr);
+ setArgOperand(0, Ptr);
}
void setLength(Value *L) {
assert(getLength()->getType() == L->getType() &&
"setLength called with value of wrong type!");
- setOperand(3, L);
+ setArgOperand(2, L);
}
-
+
void setAlignment(Constant* A) {
- setOperand(4, A);
+ setArgOperand(3, A);
}
void setVolatile(Constant* V) {
- setOperand(5, V);
+ setArgOperand(4, V);
}
const Type *getAlignmentType() const {
- return getOperand(4)->getType();
+ return getArgOperand(3)->getType();
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemIntrinsic *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -192,14 +192,14 @@ namespace llvm {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+
void setValue(Value *Val) {
assert(getValue()->getType() == Val->getType() &&
- "setSource called with pointer of wrong type!");
- setOperand(2, Val);
+ "setValue called with value of wrong type!");
+ setArgOperand(1, Val);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemSetInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -209,26 +209,26 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
///
class MemTransferInst : public MemIntrinsic {
public:
/// get* - Return the arguments to the instruction.
///
- Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-
+ Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+
/// getSource - This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-
+
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");
- setOperand(2, Ptr);
+ setArgOperand(1, Ptr);
}
-
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const MemTransferInst *) { return true; }
static inline bool classof(const IntrinsicInst *I) {
@@ -239,8 +239,8 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
-
+
+
/// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
///
class MemCpyInst : public MemTransferInst {
@@ -282,7 +282,7 @@ namespace llvm {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
}
};
-
+
/// MemoryUseIntrinsic - This is the common base class for the memory use
/// marker intrinsics.
///
diff --git a/contrib/llvm/include/llvm/Intrinsics.td b/contrib/llvm/include/llvm/Intrinsics.td
index 2b4df54..444f514 100644
--- a/contrib/llvm/include/llvm/Intrinsics.td
+++ b/contrib/llvm/include/llvm/Intrinsics.td
@@ -309,10 +309,8 @@ let Properties = [IntrNoMem] in {
def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
}
-def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>,
- GCCBuiltin<"__builtin_setjmp">;
-def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>,
- GCCBuiltin<"__builtin_longjmp">;
+def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
index c2375ea..876703b 100644
--- a/contrib/llvm/include/llvm/LinkAllPasses.h
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -113,6 +113,7 @@ namespace {
(void) llvm::createSingleLoopExtractorPass();
(void) llvm::createStripSymbolsPass();
(void) llvm::createStripNonDebugSymbolsPass();
+ (void) llvm::createStripDeadDebugInfoPass();
(void) llvm::createStripDeadPrototypesPass();
(void) llvm::createTailCallEliminationPass();
(void) llvm::createTailDuplicationPass();
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
index d9963ec..07ca070 100644
--- a/contrib/llvm/include/llvm/MC/MCAssembler.h
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -354,7 +354,7 @@ public:
typedef FragmentListType::reverse_iterator reverse_iterator;
private:
- iplist<MCFragment> Fragments;
+ FragmentListType Fragments;
const MCSection *Section;
/// Ordinal - The section index in the assemblers section list.
@@ -641,7 +641,7 @@ public:
/// in the symbol table, or whether it can be discarded by the assembler. This
/// also effects whether the assembler treats the label as potentially
/// defining a separate atom.
- bool isSymbolLinkerVisible(const MCSymbolData *SD) const;
+ bool isSymbolLinkerVisible(const MCSymbol &SD) const;
/// Emit the section contents using the given object writer.
//
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
index 03b5fb0..a57b5bf 100644
--- a/contrib/llvm/include/llvm/MC/MCContext.h
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -14,6 +14,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
class MCAsmInfo;
@@ -54,6 +55,17 @@ namespace llvm {
/// for the LocalLabelVal and adds it to the map if needed.
unsigned GetInstance(int64_t LocalLabelVal);
+ /// The file name of the log file from the enviromment variable
+ /// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
+ /// directive is used or it is an error.
+ char *SecureLogFile;
+ /// The stream that gets written to for the .secure_log_unique directive.
+ raw_ostream *SecureLog;
+ /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+ /// catch errors if .secure_log_unique appears twice without
+ /// .secure_log_reset appearing between them.
+ bool SecureLogUsed;
+
/// Allocator - Allocator object used for creating machine code objects.
///
/// We use a bump pointer allocator to avoid the need to track all allocated
@@ -127,6 +139,16 @@ namespace llvm {
/// @}
+ char *getSecureLogFile() { return SecureLogFile; }
+ raw_ostream *getSecureLog() { return SecureLog; }
+ bool getSecureLogUsed() { return SecureLogUsed; }
+ void setSecureLog(raw_ostream *Value) {
+ SecureLog = Value;
+ }
+ void setSecureLogUsed(bool Value) {
+ SecureLogUsed = Value;
+ }
+
void *Allocate(unsigned Size, unsigned Align = 8) {
return Allocator.Allocate(Size, Align);
}
diff --git a/contrib/llvm/include/llvm/MC/MCDirectives.h b/contrib/llvm/include/llvm/MC/MCDirectives.h
index 1f7364d..223b09e 100644
--- a/contrib/llvm/include/llvm/MC/MCDirectives.h
+++ b/contrib/llvm/include/llvm/MC/MCDirectives.h
@@ -38,7 +38,8 @@ enum MCSymbolAttr {
MCSA_Reference, ///< .reference (MachO)
MCSA_Weak, ///< .weak
MCSA_WeakDefinition, ///< .weak_definition (MachO)
- MCSA_WeakReference ///< .weak_reference (MachO)
+ MCSA_WeakReference, ///< .weak_reference (MachO)
+ MCSA_WeakDefAutoPrivate ///< .weak_def_can_be_hidden (MachO)
};
enum MCAssemblerFlag {
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
new file mode 100644
index 0000000..7b9ff00
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -0,0 +1,56 @@
+//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTSTREAMER_H
+#define LLVM_MC_MCOBJECTSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MCAssembler;
+class MCCodeEmitter;
+class MCSectionData;
+class TargetAsmBackend;
+class raw_ostream;
+
+/// \brief Streaming object file generation interface.
+///
+/// This class provides an implementation of the MCStreamer interface which is
+/// suitable for use with the assembler backend. Specific object file formats
+/// are expected to subclass this interface to implement directives specific
+/// to that file format or custom semantics expected by the object writer
+/// implementation.
+class MCObjectStreamer : public MCStreamer {
+ MCAssembler *Assembler;
+ MCSectionData *CurSectionData;
+
+protected:
+ MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &_OS, MCCodeEmitter *_Emitter);
+ ~MCObjectStreamer();
+
+ MCSectionData *getCurrentSectionData() const {
+ return CurSectionData;
+ }
+
+public:
+ MCAssembler &getAssembler() { return *Assembler; }
+
+ /// @name MCStreamer Interface
+ /// @{
+
+ virtual void SwitchSection(const MCSection *Section);
+ virtual void Finish();
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
index e900584..22eea7e 100644
--- a/contrib/llvm/include/llvm/MC/MCObjectWriter.h
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -162,6 +162,8 @@ public:
/// @}
};
+MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS);
+
} // End llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
index cf6eefb..2187889 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -33,8 +33,6 @@ class AsmLexer : public MCAsmLexer {
const char *CurPtr;
const MemoryBuffer *CurBuf;
- const char *TokStart;
-
void operator=(const AsmLexer&); // DO NOT IMPLEMENT
AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT
@@ -48,9 +46,7 @@ public:
void setBuffer(const MemoryBuffer *buf, const char *ptr = NULL);
- SMLoc getLoc() const;
-
- StringRef LexUntilEndOfStatement();
+ virtual StringRef LexUntilEndOfStatement();
bool isAtStartOfComment(char Char);
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/AsmParser.h
index e929fd1..0e8570a 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/AsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmParser.h
@@ -26,6 +26,7 @@
namespace llvm {
class AsmCond;
class AsmToken;
+class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCInst;
@@ -36,11 +37,15 @@ class TargetAsmParser;
class Twine;
class AsmParser : public MCAsmParser {
+ AsmParser(const AsmParser &); // DO NOT IMPLEMENT
+ void operator=(const AsmParser &); // DO NOT IMPLEMENT
private:
AsmLexer Lexer;
MCContext &Ctx;
MCStreamer &Out;
SourceMgr &SrcMgr;
+ MCAsmParserExtension *GenericParser;
+ MCAsmParserExtension *PlatformParser;
TargetAsmParser *TargetParser;
/// This is the current buffer index we're lexing from as managed by the
@@ -54,26 +59,28 @@ private:
/// invoked after the directive identifier is read and is responsible for
/// parsing and validating the rest of the directive. The handler is passed
/// in the directive name and the location of the directive keyword.
- StringMap<bool(AsmParser::*)(StringRef, SMLoc)> DirectiveMap;
+ StringMap<std::pair<MCAsmParserExtension*, DirectiveHandler> > DirectiveMap;
public:
- AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
+ AsmParser(const Target &T, SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
const MCAsmInfo &MAI);
~AsmParser();
bool Run(bool NoInitialTextSection, bool NoFinalize = false);
-
- void AddDirectiveHandler(StringRef Directive,
- bool (AsmParser::*Handler)(StringRef, SMLoc)) {
- DirectiveMap[Directive] = Handler;
+ void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) {
+ DirectiveMap[Directive] = std::make_pair(Object, Handler);
}
+
public:
TargetAsmParser &getTargetParser() const { return *TargetParser; }
- void setTargetParser(TargetAsmParser &P) { TargetParser = &P; }
+ void setTargetParser(TargetAsmParser &P);
/// @name MCAsmParser Interface
/// {
+ virtual SourceMgr &getSourceManager() { return SrcMgr; }
virtual MCAsmLexer &getLexer() { return Lexer; }
virtual MCContext &getContext() { return Ctx; }
virtual MCStreamer &getStreamer() { return Out; }
@@ -91,12 +98,8 @@ public:
/// }
private:
- MCSymbol *CreateSymbol(StringRef Name);
-
bool ParseStatement();
- bool TokError(const char *Msg);
-
void PrintMessage(SMLoc Loc, const std::string &Msg, const char *Type) const;
/// EnterIncludeFile - Enter the specified file. This returns true on failure.
@@ -104,7 +107,7 @@ private:
void EatToEndOfStatement();
- bool ParseAssignment(const StringRef &Name);
+ bool ParseAssignment(StringRef Name);
bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc);
bool ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc);
@@ -115,10 +118,6 @@ private:
bool ParseIdentifier(StringRef &Res);
// Directive Parsing.
- bool ParseDirectiveDarwinSection(); // Darwin specific ".section".
- bool ParseDirectiveSectionSwitch(const char *Segment, const char *Section,
- unsigned TAA = 0, unsigned ImplicitAlign = 0,
- unsigned StubSize = 0);
bool ParseDirectiveAscii(bool ZeroTerminated); // ".ascii", ".asciiz"
bool ParseDirectiveValue(unsigned Size); // ".byte", ".long", ...
bool ParseDirectiveFill(); // ".fill"
@@ -132,17 +131,8 @@ private:
/// accepts a single symbol (which should be a label or an external).
bool ParseDirectiveSymbolAttribute(MCSymbolAttr Attr);
bool ParseDirectiveELFType(); // ELF specific ".type"
- bool ParseDirectiveDarwinSymbolDesc(); // Darwin specific ".desc"
- bool ParseDirectiveDarwinLsym(); // Darwin specific ".lsym"
bool ParseDirectiveComm(bool IsLocal); // ".comm" and ".lcomm"
- bool ParseDirectiveDarwinZerofill(); // Darwin specific ".zerofill"
- bool ParseDirectiveDarwinTBSS(); // Darwin specific ".tbss"
-
- // Darwin specific ".subsections_via_symbols"
- bool ParseDirectiveDarwinSubsectionsViaSymbols();
- // Darwin specific .dump and .load
- bool ParseDirectiveDarwinDumpOrLoad(SMLoc IDLoc, bool IsDump);
bool ParseDirectiveAbort(); // ".abort"
bool ParseDirectiveInclude(); // ".include"
@@ -152,10 +142,6 @@ private:
bool ParseDirectiveElse(SMLoc DirectiveLoc); // ".else"
bool ParseDirectiveEndIf(SMLoc DirectiveLoc); // .endif
- bool ParseDirectiveFile(StringRef, SMLoc DirectiveLoc); // ".file"
- bool ParseDirectiveLine(StringRef, SMLoc DirectiveLoc); // ".line"
- bool ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc); // ".loc"
-
/// ParseEscapedString - Parse the current token as a string which may include
/// escaped characters and return the string contents.
bool ParseEscapedString(std::string &Data);
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
index bd1496f..d690e81 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -121,6 +121,8 @@ class MCAsmLexer {
MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT
void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
+ const char *TokStart;
+
MCAsmLexer();
virtual AsmToken LexToken() = 0;
@@ -141,6 +143,11 @@ public:
return CurTok = LexToken();
}
+ virtual StringRef LexUntilEndOfStatement() = 0;
+
+ /// getLoc - Get the current source location.
+ SMLoc getLoc() const;
+
/// getTok - Get the current (last) lexed token.
const AsmToken &getTok() {
return CurTok;
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
index 7f7f1b6..d0ccd0f 100644
--- a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -15,35 +15,48 @@
namespace llvm {
class AsmToken;
class MCAsmLexer;
+class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCStreamer;
class SMLoc;
+class SourceMgr;
+class StringRef;
class Twine;
/// MCAsmParser - Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
+public:
+ typedef bool (MCAsmParserExtension::*DirectiveHandler)(StringRef, SMLoc);
+
+private:
MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT
void operator=(const MCAsmParser &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
MCAsmParser();
-
+
public:
virtual ~MCAsmParser();
+ virtual void AddDirectiveHandler(MCAsmParserExtension *Object,
+ StringRef Directive,
+ DirectiveHandler Handler) = 0;
+
+ virtual SourceMgr &getSourceManager() = 0;
+
virtual MCAsmLexer &getLexer() = 0;
virtual MCContext &getContext() = 0;
- /// getSteamer - Return the output streamer for the assembler.
+ /// getStreamer - Return the output streamer for the assembler.
virtual MCStreamer &getStreamer() = 0;
/// Warning - Emit a warning at the location \arg L, with the message \arg
/// Msg.
virtual void Warning(SMLoc L, const Twine &Msg) = 0;
- /// Warning - Emit an error at the location \arg L, with the message \arg
+ /// Error - Emit an error at the location \arg L, with the message \arg
/// Msg.
///
/// \return The return value is always true, as an idiomatic convenience to
@@ -53,10 +66,17 @@ public:
/// Lex - Get the next AsmToken in the stream, possibly handling file
/// inclusion first.
virtual const AsmToken &Lex() = 0;
-
+
/// getTok - Get the current AsmToken from the stream.
const AsmToken &getTok();
-
+
+ /// \brief Report an error at the current lexer location.
+ bool TokError(const char *Msg);
+
+ /// ParseIdentifier - Parse an identifier or string (as a quoted identifier)
+ /// and set \arg Res to the identifier contents.
+ virtual bool ParseIdentifier(StringRef &Res) = 0;
+
/// ParseExpression - Parse an arbitrary expression.
///
/// @param Res - The value of the expression. The result is undefined
@@ -64,7 +84,7 @@ public:
/// @result - False on success.
virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
bool ParseExpression(const MCExpr *&Res);
-
+
/// ParseParenExpression - Parse an arbitrary expression, assuming that an
/// initial '(' has already been consumed.
///
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
new file mode 100644
index 0000000..ad9ccf7
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -0,0 +1,66 @@
+//===-- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMPARSEREXTENSION_H
+#define LLVM_MC_MCASMPARSEREXTENSION_H
+
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+
+/// \brief Generic interface for extending the MCAsmParser,
+/// which is implemented by target and object file assembly parser
+/// implementations.
+class MCAsmParserExtension {
+ MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+ void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT
+
+ MCAsmParser *Parser;
+
+protected:
+ MCAsmParserExtension();
+
+public:
+ virtual ~MCAsmParserExtension();
+
+ /// \brief Initialize the extension for parsing using the given \arg
+ /// Parser. The extension should use the AsmParser interfaces to register its
+ /// parsing routines.
+ virtual void Initialize(MCAsmParser &Parser);
+
+ /// @name MCAsmParser Proxy Interfaces
+ /// @{
+
+ MCContext &getContext() { return getParser().getContext(); }
+ MCAsmLexer &getLexer() { return getParser().getLexer(); }
+ MCAsmParser &getParser() { return *Parser; }
+ SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
+ MCStreamer &getStreamer() { return getParser().getStreamer(); }
+ void Warning(SMLoc L, const Twine &Msg) {
+ return getParser().Warning(L, Msg);
+ }
+ bool Error(SMLoc L, const Twine &Msg) {
+ return getParser().Error(L, Msg);
+ }
+
+ const AsmToken &Lex() { return getParser().Lex(); }
+
+ const AsmToken &getTok() { return getParser().getTok(); }
+
+ bool TokError(const char *Msg) {
+ return getParser().TokError(Msg);
+ }
+
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
index 808767c..5c99735 100644
--- a/contrib/llvm/include/llvm/MC/MCSection.h
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -23,7 +23,7 @@ namespace llvm {
class MCContext;
class MCAsmInfo;
class raw_ostream;
-
+
/// MCSection - Instances of this class represent a uniqued identifier for a
/// section in the current translation unit. The MCContext class uniques and
/// creates these.
@@ -49,7 +49,7 @@ namespace llvm {
SectionKind getKind() const { return Kind; }
SectionVariant getVariant() const { return Variant; }
-
+
virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
raw_ostream &OS) const = 0;
@@ -63,7 +63,7 @@ namespace llvm {
static bool classof(const MCSection *) { return true; }
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
index 938a388..f828e10 100644
--- a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
+++ b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -16,6 +16,8 @@
#include "llvm/MC/MCSection.h"
+#include "llvm/Support/COFF.h"
+
namespace llvm {
/// MCSectionCOFF - This represents a section on Windows
@@ -47,56 +49,6 @@ namespace llvm {
/// should be printed before the section name
bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
- //FIXME: all COFF enumerations/flags should be standardized into one place...
- // Target/X86COFF.h doesn't seem right as COFF can be used for other targets,
- // MC/WinCOFF.h maybe right as it isn't target or entity specific, and it is
- // pretty low on the dependancy graph (is there any need to support non
- // windows COFF?)
- // here is good for section stuff, but others should go elsewhere
-
- /// Valid section flags.
- enum {
- IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
- IMAGE_SCN_CNT_CODE = 0x00000020,
- IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
- IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
- IMAGE_SCN_LNK_OTHER = 0x00000100,
- IMAGE_SCN_LNK_INFO = 0x00000200,
- IMAGE_SCN_LNK_REMOVE = 0x00000800,
- IMAGE_SCN_LNK_COMDAT = 0x00001000,
- IMAGE_SCN_MEM_FARDATA = 0x00008000,
- IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
- IMAGE_SCN_MEM_16BIT = 0x00020000,
- IMAGE_SCN_MEM_LOCKED = 0x00040000,
- IMAGE_SCN_MEM_PRELOAD = 0x00080000,
- /* these are handled elsewhere
- IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
- IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
- IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
- IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
- IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
- IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
- IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
- */
- IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
- IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
- IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
- IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
- IMAGE_SCN_MEM_SHARED = 0x10000000,
- IMAGE_SCN_MEM_EXECUTE = 0x20000000,
- IMAGE_SCN_MEM_READ = 0x40000000,
- IMAGE_SCN_MEM_WRITE = 0x80000000
- };
-
- enum {
- IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
- IMAGE_COMDAT_SELECT_ANY,
- IMAGE_COMDAT_SELECT_SAME_SIZE,
- IMAGE_COMDAT_SELECT_EXACT_MATCH,
- IMAGE_COMDAT_SELECT_ASSOCIATIVE,
- IMAGE_COMDAT_SELECT_LARGEST
- };
-
StringRef getSectionName() const { return SectionName; }
unsigned getCharacteristics() const { return Characteristics; }
int getSelection () const { return Selection; }
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
index 0783159..aca7dd3 100644
--- a/contrib/llvm/include/llvm/MC/MCStreamer.h
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -314,7 +314,7 @@ namespace llvm {
virtual void EmitRawText(StringRef String);
void EmitRawText(const Twine &String);
- /// Finish - Finish emission of machine code and flush any output.
+ /// Finish - Finish emission of machine code.
virtual void Finish() = 0;
};
@@ -341,12 +341,18 @@ namespace llvm {
MCCodeEmitter *CE = 0,
bool ShowInst = false);
- /// createMachOStreamer - Create a machine code streamer which will generative
+ /// createMachOStreamer - Create a machine code streamer which will generate
/// Mach-O format object files.
MCStreamer *createMachOStreamer(MCContext &Ctx, TargetAsmBackend &TAB,
raw_ostream &OS, MCCodeEmitter *CE,
bool RelaxAll = false);
+ /// createWinCOFFStreamer - Create a machine code streamer which will
+ /// generate Microsoft COFF format object files.
+ MCStreamer *createWinCOFFStreamer(MCContext &Ctx,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE, raw_ostream &OS);
+
/// createLoggingStreamer - Create a machine code streamer which just logs the
/// API calls and then dispatches to another streamer.
///
diff --git a/contrib/llvm/include/llvm/MC/SectionKind.h b/contrib/llvm/include/llvm/MC/SectionKind.h
index c9557f2..85a91c6 100644
--- a/contrib/llvm/include/llvm/MC/SectionKind.h
+++ b/contrib/llvm/include/llvm/MC/SectionKind.h
@@ -29,10 +29,10 @@ class SectionKind {
enum Kind {
/// Metadata - Debug info sections or other metadata.
Metadata,
-
+
/// Text - Text section, used for functions and other executable code.
Text,
-
+
/// ReadOnly - Data that is never written to at program runtime by the
/// program or the dynamic linker. Things in the top-level readonly
/// SectionKind are not mergeable.
@@ -45,7 +45,7 @@ class SectionKind {
/// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
Mergeable1ByteCString,
-
+
/// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
Mergeable2ByteCString,
@@ -56,11 +56,11 @@ class SectionKind {
/// constants together. For example, this can be used to unique
/// constant pool entries etc.
MergeableConst,
-
+
/// MergeableConst4 - This is a section used by 4-byte constants,
/// for example, floats.
MergeableConst4,
-
+
/// MergeableConst8 - This is a section used by 8-byte constants,
/// for example, doubles.
MergeableConst8,
@@ -68,33 +68,33 @@ class SectionKind {
/// MergeableConst16 - This is a section used by 16-byte constants,
/// for example, vectors.
MergeableConst16,
-
+
/// Writeable - This is the base of all segments that need to be written
/// to during program runtime.
-
+
/// ThreadLocal - This is the base of all TLS segments. All TLS
/// objects must be writeable, otherwise there is no reason for them to
/// be thread local!
-
+
/// ThreadBSS - Zero-initialized TLS data objects.
ThreadBSS,
-
+
/// ThreadData - Initialized TLS data objects.
ThreadData,
-
+
/// GlobalWriteableData - Writeable data that is global (not thread
/// local).
-
+
/// BSS - Zero initialized writeable data.
BSS,
-
+
/// BSSLocal - This is BSS (zero initialized and writable) data
/// which has local linkage.
BSSLocal,
-
+
/// BSSExtern - This is BSS data with normal external linkage.
BSSExtern,
-
+
/// Common - Data with common linkage. These represent tentative
/// definitions, which always have a zero initializer and are never
/// marked 'constant'.
@@ -123,20 +123,20 @@ class SectionKind {
/// mark the pages these globals end up on as read-only after it is
/// done with its relocation phase.
ReadOnlyWithRel,
-
+
/// ReadOnlyWithRelLocal - This is data that is readonly by the
/// program, but must be writeable so that the dynamic linker
/// can perform relocations in it. This is used when we know
/// that all the relocations are to globals in this final
/// linked image.
ReadOnlyWithRelLocal
-
+
} K : 8;
public:
-
+
bool isMetadata() const { return K == Metadata; }
bool isText() const { return K == Text; }
-
+
bool isReadOnly() const {
return K == ReadOnly || isMergeableCString() ||
isMergeableConst();
@@ -149,7 +149,7 @@ public:
bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
-
+
bool isMergeableConst() const {
return K == MergeableConst || K == MergeableConst4 ||
K == MergeableConst8 || K == MergeableConst16;
@@ -157,38 +157,38 @@ public:
bool isMergeableConst4() const { return K == MergeableConst4; }
bool isMergeableConst8() const { return K == MergeableConst8; }
bool isMergeableConst16() const { return K == MergeableConst16; }
-
+
bool isWriteable() const {
return isThreadLocal() || isGlobalWriteableData();
}
-
+
bool isThreadLocal() const {
return K == ThreadData || K == ThreadBSS;
}
-
- bool isThreadBSS() const { return K == ThreadBSS; }
- bool isThreadData() const { return K == ThreadData; }
+
+ bool isThreadBSS() const { return K == ThreadBSS; }
+ bool isThreadData() const { return K == ThreadData; }
bool isGlobalWriteableData() const {
return isBSS() || isCommon() || isDataRel() || isReadOnlyWithRel();
}
-
+
bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
bool isBSSLocal() const { return K == BSSLocal; }
bool isBSSExtern() const { return K == BSSExtern; }
-
+
bool isCommon() const { return K == Common; }
-
+
bool isDataRel() const {
return K == DataRel || K == DataRelLocal || K == DataNoRel;
}
-
+
bool isDataRelLocal() const {
return K == DataRelLocal || K == DataNoRel;
}
bool isDataNoRel() const { return K == DataNoRel; }
-
+
bool isReadOnlyWithRel() const {
return K == ReadOnlyWithRel || K == ReadOnlyWithRelLocal;
}
@@ -196,14 +196,14 @@ public:
bool isReadOnlyWithRelLocal() const {
return K == ReadOnlyWithRelLocal;
}
-private:
+private:
static SectionKind get(Kind K) {
SectionKind Res;
Res.K = K;
return Res;
}
public:
-
+
static SectionKind getMetadata() { return get(Metadata); }
static SectionKind getText() { return get(Text); }
static SectionKind getReadOnly() { return get(ReadOnly); }
@@ -234,7 +234,7 @@ public:
return get(ReadOnlyWithRelLocal);
}
};
-
+
} // end namespace llvm
#endif
diff --git a/contrib/llvm/include/llvm/Module.h b/contrib/llvm/include/llvm/Module.h
index 901fada..5fc0418 100644
--- a/contrib/llvm/include/llvm/Module.h
+++ b/contrib/llvm/include/llvm/Module.h
@@ -197,11 +197,11 @@ public:
/// Get any module-scope inline assembly blocks.
/// @returns a string containing the module-scope inline assembly blocks.
const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
-
+
/// @}
/// @name Module Level Mutators
/// @{
-
+
/// Set the module identifier.
void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
@@ -235,12 +235,12 @@ public:
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
/// This ID is uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
-
+
/// getMDKindNames - Populate client supplied SmallVector with the name for
/// custom metadata IDs registered in this LLVMContext. ID #0 is not used,
/// so it is filled in as an empty string.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
-
+
/// @}
/// @name Function Accessors
/// @{
@@ -277,7 +277,7 @@ public:
Constant *getOrInsertTargetIntrinsic(StringRef Name,
const FunctionType *Ty,
AttrListPtr AttributeList);
-
+
/// getFunction - Look up the specified function in the module symbol table.
/// If it does not exist, return null.
Function *getFunction(StringRef Name) const;
@@ -321,14 +321,14 @@ public:
/// @}
/// @name Named Metadata Accessors
/// @{
-
+
/// getNamedMetadata - Return the first NamedMDNode in the module with the
- /// specified name. This method returns null if a NamedMDNode with the
+ /// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
- NamedMDNode *getNamedMetadata(StringRef Name) const;
+ NamedMDNode *getNamedMetadata(const Twine &Name) const;
- /// getOrInsertNamedMetadata - Return the first named MDNode in the module
- /// with the specified name. This method returns a new NamedMDNode if a
+ /// getOrInsertNamedMetadata - Return the first named MDNode in the module
+ /// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
@@ -515,15 +515,16 @@ public:
const_named_metadata_iterator named_metadata_begin() const {
return NamedMDList.begin();
}
-
+
/// Get an iterator to the last named metadata.
named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
/// Get a constant iterator to the last named metadata.
const_named_metadata_iterator named_metadata_end() const {
return NamedMDList.end();
}
-
- /// Determine how many NamedMDNodes are in the Module's list of named metadata.
+
+ /// Determine how many NamedMDNodes are in the Module's list of named
+ /// metadata.
size_t named_metadata_size() const { return NamedMDList.size(); }
/// Determine if the list of named metadata is empty.
bool named_metadata_empty() const { return NamedMDList.empty(); }
@@ -535,7 +536,7 @@ public:
/// Print the module to an output stream with AssemblyAnnotationWriter.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW) const;
-
+
/// Dump the module to stderr (for debugging).
void dump() const;
/// This function causes all the subinstructions to "let go" of all references
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
index 8d0c47d..5a58931 100644
--- a/contrib/llvm/include/llvm/Pass.h
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -31,7 +31,6 @@
#include "llvm/System/DataTypes.h"
-#include <cassert>
#include <string>
#include <utility>
#include <vector>
@@ -89,13 +88,8 @@ class Pass {
Pass(const Pass &); // DO NOT IMPLEMENT
public:
- explicit Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
- explicit Pass(PassKind K, const void *pid)
- : Resolver(0), PassID((intptr_t)pid), Kind(K) {
- assert(pid && "pid cannot be 0");
- }
+ explicit Pass(PassKind K, intptr_t pid);
+ explicit Pass(PassKind K, const void *pid);
virtual ~Pass();
@@ -138,13 +132,8 @@ public:
virtual PassManagerType getPotentialPassManagerType() const;
// Access AnalysisResolver
- inline void setResolver(AnalysisResolver *AR) {
- assert(!Resolver && "Resolver is already set");
- Resolver = AR;
- }
- inline AnalysisResolver *getResolver() {
- return Resolver;
- }
+ void setResolver(AnalysisResolver *AR);
+ AnalysisResolver *getResolver() const { return Resolver; }
/// getAnalysisUsage - This function should be overriden by passes that need
/// analysis information to do their job. If a pass specifies that it uses a
@@ -170,11 +159,9 @@ public:
/// an analysis interface through multiple inheritance. If needed, it should
/// override this to adjust the this pointer as needed for the specified pass
/// info.
- virtual void *getAdjustedAnalysisPointer(const PassInfo *) {
- return this;
- }
- virtual ImmutablePass *getAsImmutablePass() { return 0; }
- virtual PMDataManager *getAsPMDataManager() { return 0; }
+ virtual void *getAdjustedAnalysisPointer(const PassInfo *);
+ virtual ImmutablePass *getAsImmutablePass();
+ virtual PMDataManager *getAsPMDataManager();
/// verifyAnalysis() - This member can be implemented by a analysis pass to
/// check state of analysis information.
diff --git a/contrib/llvm/include/llvm/PassAnalysisSupport.h b/contrib/llvm/include/llvm/PassAnalysisSupport.h
index d59be3c..977d4f4 100644
--- a/contrib/llvm/include/llvm/PassAnalysisSupport.h
+++ b/contrib/llvm/include/llvm/PassAnalysisSupport.h
@@ -49,22 +49,13 @@ public:
// addRequired - Add the specified ID to the required set of the usage info
// for a pass.
//
- AnalysisUsage &addRequiredID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredID(AnalysisID ID);
template<class PassClass>
AnalysisUsage &addRequired() {
return addRequiredID(Pass::getClassPassInfo<PassClass>());
}
- AnalysisUsage &addRequiredTransitiveID(AnalysisID ID) {
- assert(ID && "Pass class not registered!");
- Required.push_back(ID);
- RequiredTransitive.push_back(ID);
- return *this;
- }
+ AnalysisUsage &addRequiredTransitiveID(AnalysisID ID);
template<class PassClass>
AnalysisUsage &addRequiredTransitive() {
AnalysisID ID = Pass::getClassPassInfo<PassClass>();
diff --git a/contrib/llvm/include/llvm/PassManagers.h b/contrib/llvm/include/llvm/PassManagers.h
index ed1e80e..81b7e7a 100644
--- a/contrib/llvm/include/llvm/PassManagers.h
+++ b/contrib/llvm/include/llvm/PassManagers.h
@@ -302,10 +302,7 @@ public:
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
- virtual Pass * getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
- assert (0 && "Unable to find on the fly pass");
- return NULL;
- }
+ virtual Pass *getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F);
/// Initialize available analysis information.
void initializeAnalysisInfo() {
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
index b229989..b018351 100644
--- a/contrib/llvm/include/llvm/PassSupport.h
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -109,13 +109,7 @@ public:
}
/// createPass() - Use this method to create an instance of this pass.
- Pass *createPass() const {
- assert((!isAnalysisGroup() || NormalCtor) &&
- "No default implementation found for analysis group!");
- assert(NormalCtor &&
- "Cannot call createPass on PassInfo without default ctor!");
- return NormalCtor();
- }
+ Pass *createPass() const;
/// addInterfaceImplemented - This method is called when this pass is
/// registered as a member of an analysis group with the RegisterAnalysisGroup
diff --git a/contrib/llvm/include/llvm/Support/CFG.h b/contrib/llvm/include/llvm/Support/CFG.h
index f07c719..9ba71fc 100644
--- a/contrib/llvm/include/llvm/Support/CFG.h
+++ b/contrib/llvm/include/llvm/Support/CFG.h
@@ -53,7 +53,7 @@ public:
assert(!It.atEnd() && "pred_iterator out of range!");
return cast<TerminatorInst>(*It)->getParent();
}
- inline pointer *operator->() const { return &(operator*()); }
+ inline pointer *operator->() const { return &operator*(); }
inline Self& operator++() { // Preincrement
assert(!It.atEnd() && "pred_iterator out of range!");
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
new file mode 100644
index 0000000..69137bf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -0,0 +1,217 @@
+//===-- llvm/Support/COFF.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an definitions used in Windows COFF Files.
+//
+// Structures and enums defined within this file where created using
+// information from Microsoft's publicly available PE/COFF format document:
+//
+// Microsoft Portable Executable and Common Object File Format Specification
+// Revision 8.1 - February 15, 2008
+//
+// As of 5/2/2010, hosted by Microsoft at:
+// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WIN_COFF_H
+#define LLVM_SUPPORT_WIN_COFF_H
+
+#include "llvm/System/DataTypes.h"
+#include <cstring>
+
+namespace llvm {
+namespace COFF {
+
+ // Sizes in bytes of various things in the COFF format.
+ enum {
+ HeaderSize = 20,
+ NameSize = 8,
+ SymbolSize = 18,
+ SectionSize = 40,
+ RelocationSize = 10
+ };
+
+ struct header {
+ uint16_t Machine;
+ uint16_t NumberOfSections;
+ uint32_t TimeDateStamp;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ uint16_t SizeOfOptionalHeader;
+ uint16_t Characteristics;
+ };
+
+ struct symbol {
+ char Name[NameSize];
+ uint32_t Value;
+ uint16_t Type;
+ uint8_t StorageClass;
+ uint16_t SectionNumber;
+ uint8_t NumberOfAuxSymbols;
+ };
+
+ enum SymbolFlags {
+ SF_TypeMask = 0x0000FFFF,
+ SF_TypeShift = 0,
+
+ SF_ClassMask = 0x00FF0000,
+ SF_ClassShift = 16,
+
+ SF_WeakReference = 0x01000000
+ };
+
+ /// Storage class tells where and what the symbol represents
+ enum SymbolStorageClass {
+ IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function
+ IMAGE_SYM_CLASS_NULL = 0, ///< No symbol
+ IMAGE_SYM_CLASS_AUTOMATIC = 1, ///< Stack variable
+ IMAGE_SYM_CLASS_EXTERNAL = 2, ///< External symbol
+ IMAGE_SYM_CLASS_STATIC = 3, ///< Static
+ IMAGE_SYM_CLASS_REGISTER = 4, ///< Register variable
+ IMAGE_SYM_CLASS_EXTERNAL_DEF = 5, ///< External definition
+ IMAGE_SYM_CLASS_LABEL = 6, ///< Label
+ IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7, ///< Undefined label
+ IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8, ///< Member of structure
+ IMAGE_SYM_CLASS_ARGUMENT = 9, ///< Function argument
+ IMAGE_SYM_CLASS_STRUCT_TAG = 10, ///< Structure tag
+ IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11, ///< Member of union
+ IMAGE_SYM_CLASS_UNION_TAG = 12, ///< Union tag
+ IMAGE_SYM_CLASS_TYPE_DEFINITION = 13, ///< Type definition
+ IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static
+ IMAGE_SYM_CLASS_ENUM_TAG = 15, ///< Enumeration tag
+ IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16, ///< Member of enumeration
+ IMAGE_SYM_CLASS_REGISTER_PARAM = 17, ///< Register parameter
+ IMAGE_SYM_CLASS_BIT_FIELD = 18, ///< Bit field
+ /// ".bb" or ".eb" - beginning or end of block
+ IMAGE_SYM_CLASS_BLOCK = 100,
+ /// ".bf" or ".ef" - beginning or end of function
+ IMAGE_SYM_CLASS_FUNCTION = 101,
+ IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure
+ IMAGE_SYM_CLASS_FILE = 103, ///< File name
+ /// Line number, reformatted as symbol
+ IMAGE_SYM_CLASS_SECTION = 104,
+ IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag
+ /// External symbol in dmert public lib
+ IMAGE_SYM_CLASS_CLR_TOKEN = 107
+ };
+
+ enum SymbolBaseType {
+ IMAGE_SYM_TYPE_NULL = 0, ///< No type information or unknown base type.
+ IMAGE_SYM_TYPE_VOID = 1, ///< Used with void pointers and functions.
+ IMAGE_SYM_TYPE_CHAR = 2, ///< A character (signed byte).
+ IMAGE_SYM_TYPE_SHORT = 3, ///< A 2-byte signed integer.
+ IMAGE_SYM_TYPE_INT = 4, ///< A natural integer type on the target.
+ IMAGE_SYM_TYPE_LONG = 5, ///< A 4-byte signed integer.
+ IMAGE_SYM_TYPE_FLOAT = 6, ///< A 4-byte floating-point number.
+ IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number.
+ IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure.
+ IMAGE_SYM_TYPE_UNION = 9, ///< An union.
+ IMAGE_SYM_TYPE_ENUM = 10, ///< An enumerated type.
+ IMAGE_SYM_TYPE_MOE = 11, ///< A member of enumeration (a specific value).
+ IMAGE_SYM_TYPE_BYTE = 12, ///< A byte; unsigned 1-byte integer.
+ IMAGE_SYM_TYPE_WORD = 13, ///< A word; unsigned 2-byte integer.
+ IMAGE_SYM_TYPE_UINT = 14, ///< An unsigned integer of natural size.
+ IMAGE_SYM_TYPE_DWORD = 15 ///< An unsigned 4-byte integer.
+ };
+
+ enum SymbolComplexType {
+ IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable.
+ IMAGE_SYM_DTYPE_POINTER = 1, ///< A pointer to base type.
+ IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
+ IMAGE_SYM_DTYPE_ARRAY = 3, ///< An array of base type.
+
+ /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
+ SCT_COMPLEX_TYPE_SHIFT = 4
+ };
+
+ struct section {
+ char Name[NameSize];
+ uint32_t VirtualSize;
+ uint32_t VirtualAddress;
+ uint32_t SizeOfRawData;
+ uint32_t PointerToRawData;
+ uint32_t PointerToRelocations;
+ uint32_t PointerToLineNumbers;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLineNumbers;
+ uint32_t Characteristics;
+ };
+
+ enum SectionCharacteristics {
+ IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
+ IMAGE_SCN_CNT_CODE = 0x00000020,
+ IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
+ IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
+ IMAGE_SCN_LNK_OTHER = 0x00000100,
+ IMAGE_SCN_LNK_INFO = 0x00000200,
+ IMAGE_SCN_LNK_REMOVE = 0x00000800,
+ IMAGE_SCN_LNK_COMDAT = 0x00001000,
+ IMAGE_SCN_GPREL = 0x00008000,
+ IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
+ IMAGE_SCN_MEM_16BIT = 0x00020000,
+ IMAGE_SCN_MEM_LOCKED = 0x00040000,
+ IMAGE_SCN_MEM_PRELOAD = 0x00080000,
+ IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
+ IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
+ IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
+ IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
+ IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
+ IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
+ IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
+ IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
+ IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
+ IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
+ IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
+ IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
+ IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
+ IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
+ IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
+ IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
+ IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
+ IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
+ IMAGE_SCN_MEM_SHARED = 0x10000000,
+ IMAGE_SCN_MEM_EXECUTE = 0x20000000,
+ IMAGE_SCN_MEM_READ = 0x40000000,
+ IMAGE_SCN_MEM_WRITE = 0x80000000
+ };
+
+ struct relocation {
+ uint32_t VirtualAddress;
+ uint32_t SymbolTableIndex;
+ uint16_t Type;
+ };
+
+ enum RelocationTypeX86 {
+ IMAGE_REL_I386_ABSOLUTE = 0x0000,
+ IMAGE_REL_I386_DIR16 = 0x0001,
+ IMAGE_REL_I386_REL16 = 0x0002,
+ IMAGE_REL_I386_DIR32 = 0x0006,
+ IMAGE_REL_I386_DIR32NB = 0x0007,
+ IMAGE_REL_I386_SEG12 = 0x0009,
+ IMAGE_REL_I386_SECTION = 0x000A,
+ IMAGE_REL_I386_SECREL = 0x000B,
+ IMAGE_REL_I386_TOKEN = 0x000C,
+ IMAGE_REL_I386_SECREL7 = 0x000D,
+ IMAGE_REL_I386_REL32 = 0x0014
+ };
+
+ enum {
+ IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
+ IMAGE_COMDAT_SELECT_ANY,
+ IMAGE_COMDAT_SELECT_SAME_SIZE,
+ IMAGE_COMDAT_SELECT_EXACT_MATCH,
+ IMAGE_COMDAT_SELECT_ASSOCIATIVE,
+ IMAGE_COMDAT_SELECT_LARGEST
+ };
+
+} // End namespace llvm.
+} // End namespace COFF.
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CallSite.h b/contrib/llvm/include/llvm/Support/CallSite.h
index 0650b61..38ee08b 100644
--- a/contrib/llvm/include/llvm/Support/CallSite.h
+++ b/contrib/llvm/include/llvm/Support/CallSite.h
@@ -204,9 +204,9 @@ public:
CALLSITE_DELEGATE_GETTER(isNoInline());
}
void setIsNoInline(bool Value = true) {
- CALLSITE_DELEGATE_GETTER(setIsNoInline(Value));
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
}
-
+
/// @brief Determine if the call does not access memory.
bool doesNotAccessMemory() const {
CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
@@ -256,14 +256,14 @@ private:
/// Returns the operand number of the first argument
unsigned getArgumentOffset() const {
if (isCall())
- return 1; // Skip Function (ATM)
+ return CallInst::ArgOffset; // Skip Function (ATM)
else
return 0; // Args are at the front
}
unsigned getArgumentEndOffset() const {
if (isCall())
- return 0; // Unchanged (ATM)
+ return CallInst::ArgOffset ? 0 : 1; // Unchanged (ATM)
else
return 3; // Skip BB, BB, Function
}
@@ -273,7 +273,9 @@ private:
// of the op_*() functions here. See CallSite::getCallee.
//
if (isCall())
- return getInstruction()->op_begin(); // Unchanged (ATM)
+ return CallInst::ArgOffset
+ ? getInstruction()->op_begin() // Unchanged
+ : getInstruction()->op_end() - 1; // Skip Function
else
return getInstruction()->op_end() - 3; // Skip BB, BB, Function
}
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.h b/contrib/llvm/include/llvm/Support/Dwarf.h
index 3d25e03..3ca8d96 100644
--- a/contrib/llvm/include/llvm/Support/Dwarf.h
+++ b/contrib/llvm/include/llvm/Support/Dwarf.h
@@ -300,12 +300,99 @@ enum dwarf_constants {
DW_OP_ne = 0x2e,
DW_OP_lit0 = 0x30,
DW_OP_lit1 = 0x31,
+ DW_OP_lit2 = 0x32,
+ DW_OP_lit3 = 0x33,
+ DW_OP_lit4 = 0x34,
+ DW_OP_lit5 = 0x35,
+ DW_OP_lit6 = 0x36,
+ DW_OP_lit7 = 0x37,
+ DW_OP_lit8 = 0x38,
+ DW_OP_lit9 = 0x39,
+ DW_OP_lit10 = 0x3a,
+ DW_OP_lit11 = 0x3b,
+ DW_OP_lit12 = 0x3c,
+ DW_OP_lit13 = 0x3d,
+ DW_OP_lit14 = 0x3e,
+ DW_OP_lit15 = 0x3f,
+ DW_OP_lit16 = 0x40,
+ DW_OP_lit17 = 0x41,
+ DW_OP_lit18 = 0x42,
+ DW_OP_lit19 = 0x43,
+ DW_OP_lit20 = 0x44,
+ DW_OP_lit21 = 0x45,
+ DW_OP_lit22 = 0x46,
+ DW_OP_lit23 = 0x47,
+ DW_OP_lit24 = 0x48,
+ DW_OP_lit25 = 0x49,
+ DW_OP_lit26 = 0x4a,
+ DW_OP_lit27 = 0x4b,
+ DW_OP_lit28 = 0x4c,
+ DW_OP_lit29 = 0x4d,
+ DW_OP_lit30 = 0x4e,
DW_OP_lit31 = 0x4f,
DW_OP_reg0 = 0x50,
DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
DW_OP_reg31 = 0x6f,
DW_OP_breg0 = 0x70,
DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
index d09db39..6f939e7 100644
--- a/contrib/llvm/include/llvm/Support/ELF.h
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -10,11 +10,10 @@
// This header contains common, non-processor-specific data structures and
// constants for the ELF file format.
//
-// The details of the ELF32 bits in this file are largely based on
-// the Tool Interface Standard (TIS) Executable and Linking Format
-// (ELF) Specification Version 1.2, May 1995. The ELF64 stuff is not
-// standardized, as far as I can tell. It was largely based on information
-// I found in OpenBSD header files.
+// The details of the ELF32 bits in this file are largely based on the Tool
+// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
+// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
+// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
//
//===----------------------------------------------------------------------===//
@@ -47,8 +46,23 @@ typedef uint16_t Elf64_Quarter;
// Object file magic string.
static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' };
+// e_ident size and indices.
+enum {
+ EI_MAG0 = 0, // File identification index.
+ EI_MAG1 = 1, // File identification index.
+ EI_MAG2 = 2, // File identification index.
+ EI_MAG3 = 3, // File identification index.
+ EI_CLASS = 4, // File class.
+ EI_DATA = 5, // Data encoding.
+ EI_VERSION = 6, // File version.
+ EI_OSABI = 7, // OS/ABI identification.
+ EI_ABIVERSION = 8, // ABI version.
+ EI_PAD = 9, // Start of padding bytes.
+ EI_NIDENT = 16 // Number of bytes in e_ident.
+};
+
struct Elf32_Ehdr {
- unsigned char e_ident[16]; // ELF Identification bytes
+ unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
Elf32_Half e_type; // Type of file (see ET_* below)
Elf32_Half e_machine; // Required architecture for this file (see EM_*)
Elf32_Word e_version; // Must be equal to 1
@@ -62,17 +76,17 @@ struct Elf32_Ehdr {
Elf32_Half e_shentsize; // Size of an entry in the section header table
Elf32_Half e_shnum; // Number of entries in the section header table
Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
- bool checkMagic () const {
- return (memcmp (e_ident, ElfMagic, strlen (ElfMagic))) == 0;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
- unsigned char getFileClass () const { return e_ident[4]; }
- unsigned char getDataEncoding () { return e_ident[5]; }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// 64-bit ELF header. Fields are the same as for ELF32, but with different
// types (see above).
struct Elf64_Ehdr {
- unsigned char e_ident[16];
+ unsigned char e_ident[EI_NIDENT];
Elf64_Quarter e_type;
Elf64_Quarter e_machine;
Elf64_Half e_version;
@@ -86,6 +100,11 @@ struct Elf64_Ehdr {
Elf64_Quarter e_shentsize;
Elf64_Quarter e_shnum;
Elf64_Quarter e_shstrndx;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
// File types
@@ -117,6 +136,7 @@ enum {
EM_860 = 7, // Intel 80860
EM_MIPS = 8, // MIPS R3000
EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
EM_ARM = 40, // ARM
EM_ALPHA = 41, // DEC Alpha
EM_SPARCV9 = 43, // SPARC V9
@@ -131,13 +151,69 @@ enum {
// Object file byte orderings.
enum {
+ ELFDATANONE = 0, // Invalid data encoding.
ELFDATA2LSB = 1, // Little-endian object file
ELFDATA2MSB = 2 // Big-endian object file
};
-// OS ABI identification -- unused.
+// OS ABI identification.
enum {
- ELFOSABI_NONE = 0
+ ELFOSABI_NONE = 0, // UNIX System V ABI
+ ELFOSABI_HPUX = 1, // HP-UX operating system
+ ELFOSABI_NETBSD = 2, // NetBSD
+ ELFOSABI_LINUX = 3, // GNU/Linux
+ ELFOSABI_HURD = 4, // GNU/Hurd
+ ELFOSABI_SOLARIS = 6, // Solaris
+ ELFOSABI_AIX = 7, // AIX
+ ELFOSABI_IRIX = 8, // IRIX
+ ELFOSABI_FREEBSD = 9, // FreeBSD
+ ELFOSABI_TRU64 = 10, // TRU64 UNIX
+ ELFOSABI_MODESTO = 11, // Novell Modesto
+ ELFOSABI_OPENBSD = 12, // OpenBSD
+ ELFOSABI_OPENVMS = 13, // OpenVMS
+ ELFOSABI_NSK = 14, // Hewlett-Packard Non-Stop Kernel
+ ELFOSABI_AROS = 15, // AROS
+ ELFOSABI_FENIXOS = 16, // FenixOS
+ ELFOSABI_C6000_ELFABI = 64, // Bare-metal TMS320C6000
+ ELFOSABI_C6000_LINUX = 65, // Linux TMS320C6000
+ ELFOSABI_ARM = 97, // ARM
+ ELFOSABI_STANDALONE = 255 // Standalone (embedded) application
+};
+
+// X86_64 relocations.
+enum {
+ R_X86_64_NONE = 0,
+ R_X86_64_64 = 1,
+ R_X86_64_PC32 = 2,
+ R_X86_64_GOT32 = 3,
+ R_X86_64_PLT32 = 4,
+ R_X86_64_COPY = 5,
+ R_X86_64_GLOB_DAT = 6,
+ R_X86_64_JUMP_SLOT = 7,
+ R_X86_64_RELATIVE = 8,
+ R_X86_64_GOTPCREL = 9,
+ R_X86_64_32 = 10,
+ R_X86_64_32S = 11,
+ R_X86_64_16 = 12,
+ R_X86_64_PC16 = 13,
+ R_X86_64_8 = 14,
+ R_X86_64_PC8 = 15,
+ R_X86_64_DTPMOD64 = 16,
+ R_X86_64_DTPOFF64 = 17,
+ R_X86_64_TPOFF64 = 18,
+ R_X86_64_TLSGD = 19,
+ R_X86_64_TLSLD = 20,
+ R_X86_64_DTPOFF32 = 21,
+ R_X86_64_GOTTPOFF = 22,
+ R_X86_64_TPOFF32 = 23,
+ R_X86_64_PC64 = 24,
+ R_X86_64_GOTOFF64 = 25,
+ R_X86_64_GOTPC32 = 26,
+ R_X86_64_SIZE32 = 32,
+ R_X86_64_SIZE64 = 33,
+ R_X86_64_GOTPC32_TLSDESC = 34,
+ R_X86_64_TLSDESC_CALL = 35,
+ R_X86_64_TLSDESC = 36
};
// Section header.
@@ -207,7 +283,7 @@ enum {
SHF_MASKPROC = 0xf0000000 // Bits indicating processor-specific flags.
};
-// Symbol table entries.
+// Symbol table entries for ELF32.
struct Elf32_Sym {
Elf32_Word st_name; // Symbol name (index into string table)
Elf32_Addr st_value; // Value or address associated with the symbol
@@ -218,11 +294,31 @@ struct Elf32_Sym {
// These accessors and mutators correspond to the ELF32_ST_BIND,
// ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
- unsigned char getBinding () const { return st_info >> 4; }
- unsigned char getType () const { return st_info & 0x0f; }
- void setBinding (unsigned char b) { setBindingAndType (b, getType ()); }
- void setType (unsigned char t) { setBindingAndType (getBinding (), t); }
- void setBindingAndType (unsigned char b, unsigned char t) {
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+// Symbol table entries for ELF64.
+struct Elf64_Sym {
+ Elf64_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf64_Half st_shndx; // Which section (header table index) it's defined in
+ Elf64_Addr st_value; // Value or address associated with the symbol
+ Elf64_Xword st_size; // Size of the symbol
+
+ // These accessors and mutators are identical to those defined for ELF32
+ // symbol table entries.
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
st_info = (b << 4) + (t & 0x0f);
}
};
@@ -254,11 +350,11 @@ struct Elf32_Rel {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
};
};
@@ -271,16 +367,53 @@ struct Elf32_Rela {
// These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
// and ELF32_R_INFO macros defined in the ELF specification:
- Elf32_Word getSymbol () const { return (r_info >> 8); }
- unsigned char getType () const { return (unsigned char) (r_info & 0x0ff); }
- void setSymbol (Elf32_Word s) { setSymbolAndType (s, getType ()); }
- void setType (unsigned char t) { setSymbolAndType (getSymbol(), t); }
- void setSymbolAndType (Elf32_Word s, unsigned char t) {
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
r_info = (s << 8) + t;
};
};
-// Program header.
+// Relocation entry, without explicit addend.
+struct Elf64_Rel {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ };
+};
+
+// Relocation entry with explicit addend.
+struct Elf64_Rela {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Xword getSymbol() const { return (r_info >> 32); }
+ unsigned char getType() const {
+ return (unsigned char) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf64_Xword s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Xword s, unsigned char t) {
+ r_info = (s << 32) + (t&0xffffffffL);
+ };
+};
+
+// Program header for ELF32.
struct Elf32_Phdr {
Elf32_Word p_type; // Type of segment
Elf32_Off p_offset; // File offset where segment is located, in bytes
@@ -292,6 +425,18 @@ struct Elf32_Phdr {
Elf32_Word p_align; // Segment alignment constraint
};
+// Program header for ELF64.
+struct Elf64_Phdr {
+ Elf64_Word p_type; // Type of segment
+ Elf64_Word p_flags; // Segment flags
+ Elf64_Off p_offset; // File offset where segment is located, in bytes
+ Elf64_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf64_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf64_Xword p_align; // Segment alignment constraint
+};
+
// Segment types.
enum {
PT_NULL = 0, // Unused segment.
@@ -313,6 +458,65 @@ enum {
PF_MASKPROC = 0xf0000000 // Unspecified
};
+// Dynamic table entry for ELF32.
+struct Elf32_Dyn
+{
+ Elf32_Sword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf32_Word d_val; // Integer value of entry.
+ Elf32_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry for ELF64.
+struct Elf64_Dyn
+{
+ Elf64_Sxword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf64_Xword d_val; // Integer value of entry.
+ Elf64_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry tags.
+enum {
+ DT_NULL = 0, // Marks end of dynamic array.
+ DT_NEEDED = 1, // String table offset of needed library.
+ DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
+ DT_PLTGOT = 3, // Address associated with linkage table.
+ DT_HASH = 4, // Address of symbolic hash table.
+ DT_STRTAB = 5, // Address of dynamic string table.
+ DT_SYMTAB = 6, // Address of dynamic symbol table.
+ DT_RELA = 7, // Address of relocation table (Rela entries).
+ DT_RELASZ = 8, // Size of Rela relocation table.
+ DT_RELAENT = 9, // Size of a Rela relocation entry.
+ DT_STRSZ = 10, // Total size of the string table.
+ DT_SYMENT = 11, // Size of a symbol table entry.
+ DT_INIT = 12, // Address of initialization function.
+ DT_FINI = 13, // Address of termination function.
+ DT_SONAME = 14, // String table offset of a shared objects name.
+ DT_RPATH = 15, // String table offset of library search path.
+ DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
+ DT_REL = 17, // Address of relocation table (Rel entries).
+ DT_RELSZ = 18, // Size of Rel relocation table.
+ DT_RELENT = 19, // Size of a Rel relocation entry.
+ DT_PLTREL = 20, // Type of relocation entry used for linking.
+ DT_DEBUG = 21, // Reserved for debugger.
+ DT_TEXTREL = 22, // Relocations exist for non-writable segements.
+ DT_JMPREL = 23, // Address of relocations associated with PLT.
+ DT_BIND_NOW = 24, // Process all relocations before execution.
+ DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
+ DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
+ DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
+ DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
+ DT_LOOS = 0x60000000, // Start of environment specific tags.
+ DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
+ DT_LOPROC = 0x70000000, // Start of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF // End of processor specific tags.
+};
+
} // end namespace ELF
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/IRBuilder.h b/contrib/llvm/include/llvm/Support/IRBuilder.h
index 1fd965d..4b1b1c0 100644
--- a/contrib/llvm/include/llvm/Support/IRBuilder.h
+++ b/contrib/llvm/include/llvm/Support/IRBuilder.h
@@ -97,6 +97,48 @@ public:
I->setDebugLoc(CurDbgLocation);
}
+ /// InsertPoint - A saved insertion point.
+ class InsertPoint {
+ BasicBlock *Block;
+ BasicBlock::iterator Point;
+
+ public:
+ /// Creates a new insertion point which doesn't point to anything.
+ InsertPoint() : Block(0) {}
+
+ /// Creates a new insertion point at the given location.
+ InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
+ : Block(InsertBlock), Point(InsertPoint) {}
+
+ /// isSet - Returns true if this insert point is set.
+ bool isSet() const { return (Block != 0); }
+
+ llvm::BasicBlock *getBlock() const { return Block; }
+ llvm::BasicBlock::iterator getPoint() const { return Point; }
+ };
+
+ /// saveIP - Returns the current insert point.
+ InsertPoint saveIP() const {
+ return InsertPoint(GetInsertBlock(), GetInsertPoint());
+ }
+
+ /// saveAndClearIP - Returns the current insert point, clearing it
+ /// in the process.
+ InsertPoint saveAndClearIP() {
+ InsertPoint IP(GetInsertBlock(), GetInsertPoint());
+ ClearInsertionPoint();
+ return IP;
+ }
+
+ /// restoreIP - Sets the current insert point to a previously-saved
+ /// location.
+ void restoreIP(InsertPoint IP) {
+ if (IP.isSet())
+ SetInsertPoint(IP.getBlock(), IP.getPoint());
+ else
+ ClearInsertionPoint();
+ }
+
//===--------------------------------------------------------------------===//
// Miscellaneous creation methods.
//===--------------------------------------------------------------------===//
@@ -106,33 +148,88 @@ public:
/// specified. If Name is specified, it is the name of the global variable
/// created.
Value *CreateGlobalString(const char *Str = "", const Twine &Name = "");
+
+ /// getInt1 - Get a constant value representing either true or false.
+ ConstantInt *getInt1(bool V) {
+ return ConstantInt::get(getInt1Ty(), V);
+ }
+
+ /// getTrue - Get the constant value for i1 true.
+ ConstantInt *getTrue() {
+ return ConstantInt::getTrue(Context);
+ }
+
+ /// getFalse - Get the constant value for i1 false.
+ ConstantInt *getFalse() {
+ return ConstantInt::getFalse(Context);
+ }
+
+ /// getInt8 - Get a constant 8-bit value.
+ ConstantInt *getInt8(int8_t C) {
+ return ConstantInt::getSigned(getInt8Ty(), C);
+ }
+
+ /// getInt8 - Get a constant 8-bit value.
+ ConstantInt *getInt8(uint8_t C) {
+ return ConstantInt::get(getInt8Ty(), C);
+ }
+
+ /// getInt16 - Get a constant 16-bit value.
+ ConstantInt *getInt16(int16_t C) {
+ return ConstantInt::getSigned(getInt16Ty(), C);
+ }
+
+ /// getInt16 - Get a constant 16-bit value.
+ ConstantInt *getInt16(uint16_t C) {
+ return ConstantInt::get(getInt16Ty(), C);
+ }
+
+ /// getInt32 - Get a constant 32-bit value.
+ ConstantInt *getInt32(int32_t C) {
+ return ConstantInt::getSigned(getInt32Ty(), C);
+ }
+
+ /// getInt32 - Get a constant 32-bit value.
+ ConstantInt *getInt32(uint32_t C) {
+ return ConstantInt::get(getInt32Ty(), C);
+ }
+
+ /// getInt64 - Get a constant 64-bit value.
+ ConstantInt *getInt64(int64_t C) {
+ return ConstantInt::getSigned(getInt64Ty(), C);
+ }
+
+ /// getInt64 - Get a constant 64-bit value.
+ ConstantInt *getInt64(uint64_t C) {
+ return ConstantInt::get(getInt64Ty(), C);
+ }
//===--------------------------------------------------------------------===//
// Type creation methods
//===--------------------------------------------------------------------===//
/// getInt1Ty - Fetch the type representing a single bit
- const Type *getInt1Ty() {
+ const IntegerType *getInt1Ty() {
return Type::getInt1Ty(Context);
}
/// getInt8Ty - Fetch the type representing an 8-bit integer.
- const Type *getInt8Ty() {
+ const IntegerType *getInt8Ty() {
return Type::getInt8Ty(Context);
}
/// getInt16Ty - Fetch the type representing a 16-bit integer.
- const Type *getInt16Ty() {
+ const IntegerType *getInt16Ty() {
return Type::getInt16Ty(Context);
}
/// getInt32Ty - Fetch the type resepresenting a 32-bit integer.
- const Type *getInt32Ty() {
+ const IntegerType *getInt32Ty() {
return Type::getInt32Ty(Context);
}
/// getInt64Ty - Fetch the type representing a 64-bit integer.
- const Type *getInt64Ty() {
+ const IntegerType *getInt64Ty() {
return Type::getInt64Ty(Context);
}
@@ -151,7 +248,7 @@ public:
return Type::getVoidTy(Context);
}
- const Type *getInt8PtrTy() {
+ const PointerType *getInt8PtrTy() {
return Type::getInt8PtrTy(Context);
}
@@ -624,8 +721,8 @@ public:
return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name);
}
template<typename InputIterator>
- Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
- const Twine &Name = "") {
+ Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin,
+ InputIterator IdxEnd, const Twine &Name = "") {
if (Constant *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
InputIterator i;
diff --git a/contrib/llvm/include/llvm/Support/IRReader.h b/contrib/llvm/include/llvm/Support/IRReader.h
index 0dfc302..fe47c05 100644
--- a/contrib/llvm/include/llvm/Support/IRReader.h
+++ b/contrib/llvm/include/llvm/Support/IRReader.h
@@ -60,7 +60,8 @@ namespace llvm {
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
Err = SMDiagnostic(Filename,
- "Could not open input file '" + Filename + "'");
+ "Could not open input file "
+ "'" + Filename + "': " + ErrMsg);
return 0;
}
@@ -98,7 +99,8 @@ namespace llvm {
MemoryBuffer *F = MemoryBuffer::getFileOrSTDIN(Filename.c_str(), &ErrMsg);
if (F == 0) {
Err = SMDiagnostic(Filename,
- "Could not open input file '" + Filename + "'");
+ "Could not open input file "
+ "'" + Filename + "': " + ErrMsg);
return 0;
}
diff --git a/contrib/llvm/include/llvm/Support/MemoryBuffer.h b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
index ef7af69..8a41aa5 100644
--- a/contrib/llvm/include/llvm/Support/MemoryBuffer.h
+++ b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
@@ -26,17 +26,20 @@ namespace llvm {
/// into a memory buffer. In addition to basic access to the characters in the
/// file, this interface guarantees you can read one character past the end of
/// the file, and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
class MemoryBuffer {
const char *BufferStart; // Start of the buffer.
const char *BufferEnd; // End of the buffer.
- /// MustDeleteBuffer - True if we allocated this buffer. If so, the
- /// destructor must know the delete[] it.
- bool MustDeleteBuffer;
+ MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
+ MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
protected:
- MemoryBuffer() : MustDeleteBuffer(false) {}
+ MemoryBuffer() {}
void init(const char *BufStart, const char *BufEnd);
- void initCopyOf(const char *BufStart, const char *BufEnd);
public:
virtual ~MemoryBuffer();
@@ -62,24 +65,27 @@ public:
std::string *ErrStr = 0,
int64_t FileSize = -1,
struct stat *FileInfo = 0);
+ static MemoryBuffer *getFile(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that EndPtr[0] must be a null byte and be accessible!
static MemoryBuffer *getMemBuffer(StringRef InputData,
- const char *BufferName = "");
+ StringRef BufferName = "");
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
/// copying the contents and taking ownership of it. This has no requirements
/// on EndPtr[0].
static MemoryBuffer *getMemBufferCopy(StringRef InputData,
- const char *BufferName = "");
+ StringRef BufferName = "");
/// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
- static MemoryBuffer *getNewMemBuffer(size_t Size,
- const char *BufferName = "");
+ static MemoryBuffer *getNewMemBuffer(size_t Size, StringRef BufferName = "");
/// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
/// that is not initialized. Note that the caller should initialize the
@@ -89,7 +95,8 @@ public:
StringRef BufferName = "");
/// getSTDIN - Read all of stdin into a file buffer, and return it.
- static MemoryBuffer *getSTDIN();
+ /// If an error occurs, this returns null and fills in *ErrStr with a reason.
+ static MemoryBuffer *getSTDIN(std::string *ErrStr = 0);
/// getFileOrSTDIN - Open the specified file as a MemoryBuffer, or open stdin
@@ -99,6 +106,10 @@ public:
std::string *ErrStr = 0,
int64_t FileSize = -1,
struct stat *FileInfo = 0);
+ static MemoryBuffer *getFileOrSTDIN(const char *Filename,
+ std::string *ErrStr = 0,
+ int64_t FileSize = -1,
+ struct stat *FileInfo = 0);
};
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Support/Regex.h b/contrib/llvm/include/llvm/Support/Regex.h
index 591af00..ea65ccf 100644
--- a/contrib/llvm/include/llvm/Support/Regex.h
+++ b/contrib/llvm/include/llvm/Support/Regex.h
@@ -36,7 +36,7 @@ namespace llvm {
/// Compiles the given POSIX Extended Regular Expression \arg Regex.
/// This implementation supports regexes and matching strings with embedded
/// NUL characters.
- Regex(const StringRef &Regex, unsigned Flags = NoFlags);
+ Regex(StringRef Regex, unsigned Flags = NoFlags);
~Regex();
/// isValid - returns the error encountered during regex compilation, or
@@ -55,7 +55,7 @@ namespace llvm {
/// the first group is always the entire pattern.
///
/// This returns true on a successful match.
- bool match(const StringRef &String, SmallVectorImpl<StringRef> *Matches=0);
+ bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = 0);
/// sub - Return the result of replacing the first match of the regex in
/// \arg String with the \arg Repl string. Backreferences like "\0" in the
diff --git a/contrib/llvm/include/llvm/Support/StringPool.h b/contrib/llvm/include/llvm/Support/StringPool.h
index 82e46d4..de05e0b 100644
--- a/contrib/llvm/include/llvm/Support/StringPool.h
+++ b/contrib/llvm/include/llvm/Support/StringPool.h
@@ -64,7 +64,7 @@ namespace llvm {
/// intern - Adds a string to the pool and returns a reference-counted
/// pointer to it. No additional memory is allocated if the string already
/// exists in the pool.
- PooledStringPtr intern(const StringRef &Str);
+ PooledStringPtr intern(StringRef Str);
/// empty - Checks whether the pool is empty. Returns true if so.
///
diff --git a/contrib/llvm/include/llvm/Support/Timer.h b/contrib/llvm/include/llvm/Support/Timer.h
index 00dfeaa..f959136 100644
--- a/contrib/llvm/include/llvm/Support/Timer.h
+++ b/contrib/llvm/include/llvm/Support/Timer.h
@@ -150,8 +150,10 @@ public:
/// is primarily used for debugging and for hunting performance problems.
///
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(StringRef Name);
- explicit NamedRegionTimer(StringRef Name, StringRef GroupName);
+ explicit NamedRegionTimer(StringRef Name,
+ bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled = true);
};
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
index 90eaeea..bb9a523 100644
--- a/contrib/llvm/include/llvm/Support/raw_ostream.h
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -234,8 +234,8 @@ public:
/// @param bold bold/brighter text, default false
/// @param bg if true change the background, default: change foreground
/// @returns itself so it can be used within << invocations
- virtual raw_ostream &changeColor(enum Colors, bool = false,
- bool = false) { return *this; }
+ virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) {
+ return *this; }
/// Resets the colors to terminal defaults. Call this when you are done
/// outputting colored text, or before program exit.
diff --git a/contrib/llvm/include/llvm/SymbolTableListTraits.h b/contrib/llvm/include/llvm/SymbolTableListTraits.h
index 39953e1..91a4eb9 100644
--- a/contrib/llvm/include/llvm/SymbolTableListTraits.h
+++ b/contrib/llvm/include/llvm/SymbolTableListTraits.h
@@ -47,9 +47,8 @@ public:
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
- Sublist Sub(ItemParentClass::
- getSublistAccess(static_cast<ValueSubClass*>(0)));
- size_t Offset(size_t(&((ItemParentClass*)0->*Sub)));
+ size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
Offset);
diff --git a/contrib/llvm/include/llvm/System/DataTypes.h.cmake b/contrib/llvm/include/llvm/System/DataTypes.h.cmake
index d9ca273..9efe75a 100644
--- a/contrib/llvm/include/llvm/System/DataTypes.h.cmake
+++ b/contrib/llvm/include/llvm/System/DataTypes.h.cmake
@@ -109,41 +109,59 @@ typedef unsigned short uint16_t;
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef signed int ssize_t;
-#define INT8_MAX 127
-#define INT8_MIN -128
-#define UINT8_MAX 255
-#define INT16_MAX 32767
-#define INT16_MIN -32768
-#define UINT16_MAX 65535
-#define INT32_MAX 2147483647
-#define INT32_MIN -2147483648
-#define UINT32_MAX 4294967295U
+#ifndef INT8_MAX
+# define INT8_MAX 127
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN -128
+#endif
+#ifndef UINT8_MAX
+# define UINT8_MAX 255
+#endif
+#ifndef INT16_MAX
+# define INT16_MAX 32767
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN -32768
+#endif
+#ifndef UINT16_MAX
+# define UINT16_MAX 65535
+#endif
+#ifndef INT32_MAX
+# define INT32_MAX 2147483647
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN -2147483648
+#endif
+#ifndef UINT32_MAX
+# define UINT32_MAX 4294967295U
+#endif
/* Certain compatibility updates to VC++ introduce the `cstdint'
* header, which defines the INT*_C macros. On default installs they
* are absent. */
#ifndef INT8_C
-# define INT8_C(C) C
+# define INT8_C(C) C##i8
#endif
#ifndef UINT8_C
-# define UINT8_C(C) C
+# define UINT8_C(C) C##ui8
#endif
#ifndef INT16_C
-# define INT16_C(C) C
+# define INT16_C(C) C##i16
#endif
#ifndef UINT16_C
-# define UINT16_C(C) C
+# define UINT16_C(C) C##ui16
#endif
#ifndef INT32_C
-# define INT32_C(C) C
+# define INT32_C(C) C##i32
#endif
#ifndef UINT32_C
-# define UINT32_C(C) C ## U
+# define UINT32_C(C) C##ui32
#endif
#ifndef INT64_C
-# define INT64_C(C) ((int64_t) C ## LL)
+# define INT64_C(C) C##i64
#endif
#ifndef UINT64_C
-# define UINT64_C(C) ((uint64_t) C ## ULL)
+# define UINT64_C(C) C##ui64
#endif
#endif /* _MSC_VER */
diff --git a/contrib/llvm/include/llvm/System/Path.h b/contrib/llvm/include/llvm/System/Path.h
index d4af478..0461769 100644
--- a/contrib/llvm/include/llvm/System/Path.h
+++ b/contrib/llvm/include/llvm/System/Path.h
@@ -292,14 +292,6 @@ namespace sys {
/// @name Disk Accessors
/// @{
public:
- /// This function determines if the path name in this object references
- /// the root (top level directory) of the file system. The details of what
- /// is considered the "root" may vary from system to system so this method
- /// will do the necessary checking.
- /// @returns true iff the path name references the root directory.
- /// @brief Determines if the path references the root directory.
- bool isRootDirectory() const;
-
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
index ca551e5..9a89dc9 100644
--- a/contrib/llvm/include/llvm/Target/Target.td
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -203,7 +203,6 @@ class Instruction {
bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
bit mayLoad = 0; // Is it possible for this inst to read memory?
bit mayStore = 0; // Is it possible for this inst to write memory?
- bit isTwoAddress = 0; // Is this a two address instruction?
bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
bit isCommutable = 0; // Is this 3 operand instruction commutable?
bit isTerminator = 0; // Is this part of the terminator for a basic block?
@@ -244,7 +243,7 @@ class Instruction {
string DisableEncoding = "";
/// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
- bits<32> TSFlags = 0;
+ bits<64> TSFlags = 0;
}
/// Predicates - These are extra conditionals which are turned into instruction
@@ -397,24 +396,23 @@ class InstrInfo {
}
// Standard Pseudo Instructions.
-let isCodeGenOnly = 1 in {
+// This list must match TargetOpcodes.h and CodeGenTarget.cpp.
+// Only these instructions are allowed in the TargetOpcode namespace.
+let isCodeGenOnly = 1, Namespace = "TargetOpcode" in {
def PHI : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "PHINODE";
- let Namespace = "TargetOpcode";
}
def INLINEASM : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
}
def DBG_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -422,7 +420,6 @@ def EH_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -430,7 +427,6 @@ def GC_LABEL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins i32imm:$id);
let AsmString = "";
- let Namespace = "TargetOpcode";
let hasCtrlDep = 1;
let isNotDuplicable = 1;
}
@@ -438,21 +434,18 @@ def KILL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def EXTRACT_SUBREG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def INSERT_SUBREG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let Constraints = "$supersrc = $dst";
}
@@ -460,7 +453,6 @@ def IMPLICIT_DEF : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isReMaterializable = 1;
let isAsCheapAsAMove = 1;
@@ -469,14 +461,12 @@ def SUBREG_TO_REG : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
}
def COPY_TO_REGCLASS : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins unknown:$src, i32imm:$regclass);
let AsmString = "";
- let Namespace = "TargetOpcode";
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
@@ -484,15 +474,19 @@ def DBG_VALUE : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins variable_ops);
let AsmString = "DBG_VALUE";
- let Namespace = "TargetOpcode";
let isAsCheapAsAMove = 1;
}
-
def REG_SEQUENCE : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);
let AsmString = "";
- let Namespace = "TargetOpcode";
+ let neverHasSideEffects = 1;
+ let isAsCheapAsAMove = 1;
+}
+def COPY : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src);
+ let AsmString = "";
let neverHasSideEffects = 1;
let isAsCheapAsAMove = 1;
}
diff --git a/contrib/llvm/include/llvm/Target/TargetAsmParser.h b/contrib/llvm/include/llvm/Target/TargetAsmParser.h
index 85315c1..f431c38 100644
--- a/contrib/llvm/include/llvm/Target/TargetAsmParser.h
+++ b/contrib/llvm/include/llvm/Target/TargetAsmParser.h
@@ -10,6 +10,8 @@
#ifndef LLVM_TARGET_TARGETPARSER_H
#define LLVM_TARGET_TARGETPARSER_H
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+
namespace llvm {
class MCInst;
class StringRef;
@@ -20,7 +22,7 @@ class MCParsedAsmOperand;
template <typename T> class SmallVectorImpl;
/// TargetAsmParser - Generic interface to target specific assembly parsers.
-class TargetAsmParser {
+class TargetAsmParser : public MCAsmParserExtension {
TargetAsmParser(const TargetAsmParser &); // DO NOT IMPLEMENT
void operator=(const TargetAsmParser &); // DO NOT IMPLEMENT
protected: // Can only create subclasses.
@@ -47,7 +49,7 @@ public:
/// \param Operands [out] - The list of parsed operands, this returns
/// ownership of them to the caller.
/// \return True on failure.
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) = 0;
/// ParseDirective - Parse a target specific assembler directive
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
new file mode 100644
index 0000000..f368a2e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -0,0 +1,142 @@
+//===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
+#define LLVM_TARGET_TARGETCALLINGCONV_H
+
+namespace llvm {
+
+namespace ISD {
+ struct ArgFlagsTy {
+ private:
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
+ static const uint64_t ByValAlignOffs = 6;
+ static const uint64_t Split = 1ULL << 10;
+ static const uint64_t SplitOffs = 10;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+
+ static const uint64_t One = 1ULL; //< 1 of this type, for shifts
+
+ uint64_t Flags;
+ public:
+ ArgFlagsTy() : Flags(0) { }
+
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
+
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
+
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
+
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
+
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
+
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
+
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
+
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
+
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
+ }
+ void setOrigAlign(unsigned A) {
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
+ }
+
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
+ std::string getArgFlagsString();
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
+ };
+
+ /// InputArg - This struct carries flags and type information about a
+ /// single incoming (formal) argument or incoming (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct InputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+ bool Used;
+
+ InputArg() : VT(MVT::Other), Used(false) {}
+ InputArg(ArgFlagsTy flags, EVT vt, bool used)
+ : Flags(flags), VT(vt), Used(used) {
+ assert(VT.isSimple() &&
+ "InputArg value type must be Simple!");
+ }
+ };
+
+ /// OutputArg - This struct carries flags and a value for a
+ /// single outgoing (actual) argument or outgoing (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct OutputArg {
+ ArgFlagsTy Flags;
+ EVT VT;
+
+ /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+ bool IsFixed;
+
+ OutputArg() : IsFixed(false) {}
+ OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed)
+ : Flags(flags), VT(vt), IsFixed(isfixed) {
+ assert(VT.isSimple() &&
+ "OutputArg value type must be Simple!");
+ }
+ };
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrDesc.h b/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
index adc37e1..8f0a6cb 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrDesc.h
@@ -15,6 +15,8 @@
#ifndef LLVM_TARGET_TARGETINSTRDESC_H
#define LLVM_TARGET_TARGETINSTRDESC_H
+#include "llvm/System/DataTypes.h"
+
namespace llvm {
class TargetRegisterClass;
@@ -53,7 +55,7 @@ public:
///
/// NOTE: This member should be considered to be private, all access should go
/// through "getRegClass(TRI)" below.
- unsigned short RegClass;
+ short RegClass;
/// Flags - These are flags from the TOI::OperandFlags enum.
unsigned short Flags;
@@ -131,7 +133,7 @@ public:
unsigned short SchedClass; // enum identifying instr sched class
const char * Name; // Name of the instruction record in td file
unsigned Flags; // Flags identifying machine instr class
- unsigned TSFlags; // Target Specific Flag values
+ uint64_t TSFlags; // Target Specific Flag values
const unsigned *ImplicitUses; // Registers implicitly read by this instr
const unsigned *ImplicitDefs; // Registers implicitly defined by this instr
const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered"
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
index 2e5697e..e42be26 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -20,12 +20,14 @@
namespace llvm {
class CalleeSavedInfo;
+class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
class MDNode;
class MCInst;
class SDNode;
+class ScheduleHazardRecognizer;
class SelectionDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -120,10 +122,6 @@ public:
SrcReg == DstReg)
return true;
- if (MI.getOpcode() == TargetOpcode::EXTRACT_SUBREG &&
- MI.getOperand(0).getReg() == MI.getOperand(1).getReg())
- return true;
-
if ((MI.getOpcode() == TargetOpcode::INSERT_SUBREG ||
MI.getOpcode() == TargetOpcode::SUBREG_TO_REG) &&
MI.getOperand(0).getReg() == MI.getOperand(2).getReg())
@@ -194,11 +192,22 @@ public:
/// reMaterialize - Re-issue the specified 'original' instruction at the
/// specific location targeting a new destination register.
+ /// The register in Orig->getOperand(0).getReg() will be substituted by
+ /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+ /// SubIdx.
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const = 0;
+ const TargetRegisterInfo &TRI) const = 0;
+
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ // Do nothing.
+ }
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
@@ -224,23 +233,19 @@ public:
return 0;
}
- /// commuteInstruction - If a target has any instructions that are commutable,
- /// but require converting to a different instruction or making non-trivial
- /// changes to commute them, this method can overloaded to do this. The
- /// default implementation of this method simply swaps the first two operands
- /// of MI and returns it.
- ///
- /// If a target wants to make more aggressive changes, they can construct and
- /// return a new machine instruction. If an instruction cannot commute, it
- /// can also return null.
- ///
- /// If NewMI is true, then a new machine instruction must be created.
- ///
+ /// commuteInstruction - If a target has any instructions that are
+ /// commutable but require converting to different instructions or making
+ /// non-trivial changes to commute them, this method can overloaded to do
+ /// that. The default implementation simply swaps the commutable operands.
+ /// If NewMI is false, MI is modified in place and returned; otherwise, a
+ /// new machine instruction is created and returned. Do not call this
+ /// method for a non-commutable instruction, but there may be some cases
+ /// where this method fails and returns null.
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const = 0;
/// findCommutedOpIndices - If specified MI is commutable, return the two
- /// operand indices that would swap value. Return true if the instruction
+ /// operand indices that would swap value. Return false if the instruction
/// is not in a form which this routine understands.
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const = 0;
@@ -302,25 +307,60 @@ public:
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
+
+ /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+ /// after it, replacing it with an unconditional branch to NewDest. This is
+ /// used by the tail merging pass.
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const = 0;
+
+ /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
+ /// block at the specified instruction (i.e. instruction would be the start
+ /// of a new basic block).
+ virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ return true;
+ }
+
+ /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
+ /// of the specified basic block.
+ virtual
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ return false;
+ }
- /// copyRegToReg - Emit instructions to copy between a pair of registers. It
- /// returns false if the target does not how to copy between the specified
- /// registers.
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!");
+ /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
+ /// checks for the case where two basic blocks from true and false path
+ /// of a if-then-else (diamond) are predicated on mutally exclusive
+ /// predicates.
+ virtual bool
+ isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+ MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+ return false;
+ }
+
+ /// isProfitableToDupForIfCvt - Return true if it's profitable for
+ /// if-converter to duplicate a specific number of instructions in the
+ /// specified MBB to enable if-conversion.
+ virtual bool
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
return false;
}
+ /// copyPhysReg - Emit instructions to copy a pair of physical registers.
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
+ }
+
/// storeRegToStackSlot - Store the specified register of the given register
/// class to the specified stack frame index. The store instruction is to be
/// added to the given machine basic block before the specified machine
@@ -331,7 +371,7 @@ public:
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
+ assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
}
/// loadRegFromStackSlot - Load the specified register of the given register
@@ -343,7 +383,7 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
+ assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
}
/// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
@@ -352,7 +392,7 @@ public:
/// storeRegToStackSlot(). Returns false otherwise.
virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
+ const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const {
return false;
}
@@ -387,19 +427,17 @@ public:
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
- /// operand folded, otherwise NULL is returned. The client is responsible for
- /// removing the old instruction and adding the new one in the instruction
- /// stream.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ /// operand folded, otherwise NULL is returned.
+ /// The new instruction is inserted before MI, and the client is responsible
+ /// for removing the old instruction.
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
- MachineInstr* foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+ MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
@@ -419,7 +457,7 @@ protected:
/// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
+ const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}
@@ -429,9 +467,7 @@ public:
/// folding is possible.
virtual
bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- return false;
- }
+ const SmallVectorImpl<unsigned> &Ops) const =0;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
@@ -465,7 +501,7 @@ public:
/// only differences between the two addresses are the offset. It also returns
/// the offsets by reference.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
- int64_t &Offset1, int64_t &Offset2) const {
+ int64_t &Offset1, int64_t &Offset2) const {
return false;
}
@@ -548,6 +584,13 @@ public:
return true;
}
+ /// isSchedulingBoundary - Test if the given instruction should be
+ /// considered a scheduling boundary. This primarily includes labels and
+ /// terminators.
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const = 0;
+
/// GetInstSize - Returns the size of the specified Instruction.
///
virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
@@ -564,6 +607,12 @@ public:
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
+
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
+ /// to use for this target when scheduling the machine instructions after
+ /// register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -575,22 +624,32 @@ protected:
TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
: TargetInstrInfo(desc, NumOpcodes) {}
public:
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
+ MachineBasicBlock *NewDest) const;
virtual MachineInstr *commuteInstruction(MachineInstr *MI,
bool NewMI = false) const;
virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
+ virtual bool canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const;
virtual bool PredicateInstruction(MachineInstr *MI,
const SmallVectorImpl<MachineOperand> &Pred) const;
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubReg,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
virtual MachineInstr *duplicate(MachineInstr *Orig,
MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1) const;
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h b/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
index 3dfa8bc..39648c2 100644
--- a/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
+++ b/contrib/llvm/include/llvm/Target/TargetInstrItineraries.h
@@ -106,7 +106,8 @@ struct InstrItinerary {
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
/// used by a target.
///
-struct InstrItineraryData {
+class InstrItineraryData {
+public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
const InstrItinerary *Itineratries; ///< Array of itineraries selected
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
index 5efebe6..2b6e4fa 100644
--- a/contrib/llvm/include/llvm/Target/TargetLowering.h
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -24,6 +24,7 @@
#include "llvm/CallingConv.h"
#include "llvm/InlineAsm.h"
+#include "llvm/Attributes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/ADT/APFloat.h"
@@ -32,6 +33,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DebugLoc.h"
+#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
#include <map>
@@ -42,6 +44,7 @@ namespace llvm {
class CallInst;
class Function;
class FastISel;
+ class FunctionLoweringInfo;
class MachineBasicBlock;
class MachineFunction;
class MachineFrameInfo;
@@ -114,7 +117,7 @@ public:
/// isSelectExpensive - Return true if the select operation is expensive for
/// this target.
bool isSelectExpensive() const { return SelectIsExpensive; }
-
+
/// isIntDivCheap() - Return true if integer divide is usually cheaper than
/// a sequence of several shifts, adds, and multiplies for this target.
bool isIntDivCheap() const { return IntDivIsCheap; }
@@ -131,10 +134,10 @@ public:
virtual
MVT::SimpleValueType getSetCCResultType(EVT VT) const;
- /// getCmpLibcallReturnType - Return the ValueType for comparison
+ /// getCmpLibcallReturnType - Return the ValueType for comparison
/// libcalls. Comparions libcalls include floating point comparion calls,
/// and Ordered/Unordered check calls on floating point numbers.
- virtual
+ virtual
MVT::SimpleValueType getCmpLibcallReturnType() const;
/// getBooleanContents - For targets without i1 registers, this gives the
@@ -208,7 +211,7 @@ public:
ValueTypeActions[I] = Action;
}
};
-
+
const ValueTypeActionImpl &getValueTypeActions() const {
return ValueTypeActions;
}
@@ -229,7 +232,7 @@ public:
/// returns the integer type to transform to.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
- assert((unsigned)VT.getSimpleVT().SimpleTy <
+ assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
assert(getTypeAction(Context, NVT) != Promote &&
@@ -256,7 +259,7 @@ public:
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
else
// Promote to a power of two size, avoiding multi-step promotion.
- return getTypeAction(Context, NVT) == Promote ?
+ return getTypeAction(Context, NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
@@ -302,11 +305,11 @@ public:
/// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
/// this is the case, it returns true and store the intrinsic
/// information into the IntrinsicInfo that was passed to the function.
- struct IntrinsicInfo {
+ struct IntrinsicInfo {
unsigned opc; // target opcode
EVT memVT; // memory VT
const Value* ptrVal; // value representing memory location
- int offset; // offset off of ptrVal
+ int offset; // offset off of ptrVal
unsigned align; // alignment
bool vol; // is volatile?
bool readMem; // reads memory?
@@ -324,7 +327,7 @@ public:
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false;
}
-
+
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -446,7 +449,7 @@ public:
"Table isn't big enough!");
unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
- }
+ }
/// isIndexedStoreLegal - Return true if the specified indexed load is legal
/// on this target.
@@ -492,7 +495,7 @@ public:
assert((VT.isInteger() || VT.isFloatingPoint()) &&
"Cannot autopromote this type, add it with AddPromotedToType.");
-
+
EVT NVT = VT;
do {
NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
@@ -516,14 +519,14 @@ public:
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
virtual unsigned getByValTypeAlignment(const Type *Ty) const;
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(MVT VT) const {
assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.SimpleTy];
}
-
+
/// getRegisterType - Return the type of registers that this ValueType will
/// eventually require.
EVT getRegisterType(LLVMContext &Context, EVT VT) const {
@@ -606,7 +609,7 @@ public:
/// of the specified type. This is used, for example, in situations where an
/// array copy/move/set is converted to a sequence of store operations. It's
/// use helps to ensure that such replacements don't generate code that causes
- /// an alignment error (trap) on the target machine.
+ /// an alignment error (trap) on the target machine.
/// @brief Determine if the target supports unaligned memory accesses.
virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
return false;
@@ -637,7 +640,7 @@ public:
MachineFunction &MF) const {
return MVT::Other;
}
-
+
/// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
/// to implement llvm.setjmp.
bool usesUnderscoreSetJmp() const {
@@ -683,17 +686,10 @@ public:
return JumpBufAlignment;
}
- /// getIfCvtBlockLimit - returns the target specific if-conversion block size
- /// limit. Any block whose size is greater should not be predicated.
- unsigned getIfCvtBlockSizeLimit() const {
- return IfCvtBlockSizeLimit;
- }
-
- /// getIfCvtDupBlockLimit - returns the target specific size limit for a
- /// block to be considered for duplication. Any block whose size is greater
- /// should not be duplicated to facilitate its predication.
- unsigned getIfCvtDupBlockSizeLimit() const {
- return IfCvtDupBlockSizeLimit;
+ /// getMinStackArgumentAlignment - return the minimum stack alignment of an
+ /// argument.
+ unsigned getMinStackArgumentAlignment() const {
+ return MinStackArgumentAlignment;
}
/// getPrefLoopAlignment - return the preferred loop alignment.
@@ -701,7 +697,14 @@ public:
unsigned getPrefLoopAlignment() const {
return PrefLoopAlignment;
}
-
+
+ /// getShouldFoldAtomicFences - return whether the combiner should fold
+ /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
+ ///
+ bool getShouldFoldAtomicFences() const {
+ return ShouldFoldAtomicFences;
+ }
+
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
@@ -711,7 +714,7 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getPostIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if this node can be
/// combined with a load / store to form a post-indexed load / store.
@@ -721,12 +724,12 @@ public:
SelectionDAG &DAG) const {
return false;
}
-
+
/// getJumpTableEncoding - Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
virtual unsigned getJumpTableEncoding() const;
-
+
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
@@ -734,7 +737,7 @@ public:
assert(0 && "Need to implement this hook if target has custom JTIs");
return 0;
}
-
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -746,7 +749,7 @@ public:
virtual const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
-
+
/// isOffsetFoldingLegal - Return true if folding a constant offset
/// with the given GlobalAddress is legal. It is frequently not legal in
/// PIC relocation models.
@@ -755,36 +758,42 @@ public:
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *) const = 0;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Optimization Methods
//
-
+
/// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
/// SDValues for returning information from TargetLowering to its clients
- /// that want to combine
+ /// that want to combine
struct TargetLoweringOpt {
SelectionDAG &DAG;
bool LegalTys;
bool LegalOps;
- bool ShrinkOps;
SDValue Old;
SDValue New;
explicit TargetLoweringOpt(SelectionDAG &InDAG,
- bool LT, bool LO,
- bool Shrink = false) :
- DAG(InDAG), LegalTys(LT), LegalOps(LO), ShrinkOps(Shrink) {}
+ bool LT, bool LO) :
+ DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
bool LegalTypes() const { return LegalTys; }
bool LegalOperations() const { return LegalOps; }
-
- bool CombineTo(SDValue O, SDValue N) {
- Old = O;
- New = N;
+
+ bool CombineTo(SDValue O, SDValue N) {
+ Old = O;
+ New = N;
return true;
}
-
- /// ShrinkDemandedConstant - Check to see if the specified operand of the
+
+ /// ShrinkDemandedConstant - Check to see if the specified operand of the
/// specified instruction is a constant integer. If so, check to see if
/// there are any bits set in the constant that are not demanded. If so,
/// shrink the constant and return true.
@@ -797,25 +806,25 @@ public:
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
DebugLoc dl);
};
-
+
/// SimplifyDemandedBits - Look at Op. At this point, we know that only the
/// DemandedMask bits of the result of Op are ever used downstream. If we can
/// use this information to simplify Op, create a new simplified DAG node and
- /// return true, returning the original and new nodes in Old and New.
- /// Otherwise, analyze the expression and return a mask of KnownOne and
- /// KnownZero bits for the expression (used to simplify the caller).
- /// The KnownZero/One bits may only be accurate for those bits in the
+ /// return true, returning the original and new nodes in Old and New.
+ /// Otherwise, analyze the expression and return a mask of KnownOne and
+ /// KnownZero bits for the expression (used to simplify the caller).
+ /// The KnownZero/One bits may only be accurate for those bits in the
/// DemandedMask.
- bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
APInt &KnownZero, APInt &KnownOne,
TargetLoweringOpt &TLO, unsigned Depth = 0) const;
-
+
/// computeMaskedBitsForTargetNode - Determine which of the bits specified in
- /// Mask are known to be either zero or one and return them in the
+ /// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -825,7 +834,7 @@ public:
/// DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth = 0) const;
-
+
struct DAGCombinerInfo {
void *DC; // The DAG Combiner object.
bool BeforeLegalize;
@@ -833,15 +842,15 @@ public:
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
-
+
DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
: DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
CalledByLegalizer(cl), DAG(dag) {}
-
+
bool isBeforeLegalize() const { return BeforeLegalize; }
bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
bool isCalledByLegalizer() const { return CalledByLegalizer; }
-
+
void AddToWorklist(SDNode *N);
SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
bool AddTo = true);
@@ -851,7 +860,7 @@ public:
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
};
- /// SimplifySetCC - Try to simplify a setcc built with the specified operands
+ /// SimplifySetCC - Try to simplify a setcc built with the specified operands
/// and cc. If it is unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
ISD::CondCode Cond, bool foldBooleans,
@@ -892,7 +901,7 @@ public:
virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
return false;
}
-
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
@@ -932,7 +941,7 @@ protected:
void setStackPointerRegisterToSaveRestore(unsigned R) {
StackPointerRegisterToSaveRestore = R;
}
-
+
/// setExceptionPointerRegister - If set to a physical register, this sets
/// the register that receives the exception address on entry to a landing
/// pad.
@@ -955,12 +964,12 @@ protected:
/// expensive, and if possible, should be replaced by an alternate sequence
/// of instructions not containing an integer divide.
void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
-
+
/// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
-
+
/// addRegisterClass - Add the specified register class as an available
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
@@ -983,7 +992,7 @@ protected:
assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
}
-
+
/// setLoadExtAction - Indicate that the specified load with extension does
/// not work with the specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, MVT VT,
@@ -993,7 +1002,7 @@ protected:
"Table isn't big enough!");
LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
}
-
+
/// setTruncStoreAction - Indicate that the specified truncating store does
/// not work with the specified type and indicate what to do about it.
void setTruncStoreAction(MVT ValVT, MVT MemVT,
@@ -1018,7 +1027,7 @@ protected:
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
}
-
+
/// setIndexedStoreAction - Indicate that the specified indexed store does or
/// does not work with the specified type and indicate what to do about
/// it. NOTE: All indexed mode stores are initialized to Expand in
@@ -1033,7 +1042,7 @@ protected:
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
}
-
+
/// setCondCodeAction - Indicate that the specified condition code is or isn't
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, MVT VT,
@@ -1060,7 +1069,7 @@ protected:
assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
}
-
+
/// setJumpBufSize - Set the target's required jmp_buf buffer size (in
/// bytes); default is 200
void setJumpBufSize(unsigned Size) {
@@ -1073,25 +1082,24 @@ protected:
JumpBufAlignment = Align;
}
- /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
- /// limit (in number of instructions); default is 2.
- void setIfCvtBlockSizeLimit(unsigned Limit) {
- IfCvtBlockSizeLimit = Limit;
- }
-
- /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
- /// of instructions) to be considered for code duplication during
- /// if-conversion; default is 2.
- void setIfCvtDupBlockSizeLimit(unsigned Limit) {
- IfCvtDupBlockSizeLimit = Limit;
- }
-
/// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
/// alignment is zero, it means the target does not care about loop alignment.
void setPrefLoopAlignment(unsigned Align) {
PrefLoopAlignment = Align;
}
-
+
+ /// setMinStackArgumentAlignment - Set the minimum stack alignment of an
+ /// argument.
+ void setMinStackArgumentAlignment(unsigned Align) {
+ MinStackArgumentAlignment = Align;
+ }
+
+ /// setShouldFoldAtomicFences - Set if the target's implementation of the
+ /// atomic operation intrinsics includes locking. Default is false.
+ void setShouldFoldAtomicFences(bool fold) {
+ ShouldFoldAtomicFences = fold;
+ }
+
public:
//===--------------------------------------------------------------------===//
// Lowering methods - These methods must be implemented by targets so that
@@ -1151,6 +1159,7 @@ public:
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1163,9 +1172,8 @@ public:
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const
{
// Return true by default to get preexisting behavior.
return true;
@@ -1179,6 +1187,7 @@ public:
virtual SDValue
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
assert(0 && "Not Implemented");
return SDValue(); // this is here to silence compiler errors
@@ -1200,7 +1209,7 @@ public:
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
- /// LowerOperation - This callback is invoked for operations that are
+ /// LowerOperation - This callback is invoked for operations that are
/// unsupported by the target, which are registered to use 'custom' lowering,
/// and whose defined values are all legal.
/// If the target has no operations that require custom lowering, it need not
@@ -1227,23 +1236,14 @@ public:
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *
- createFastISel(MachineFunction &,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &,
- std::vector<std::pair<MachineInstr*, unsigned> > &
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &CatchInfoLost
-#endif
- ) const {
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const {
return 0;
}
//===--------------------------------------------------------------------===//
// Inline Asm Support hooks
//
-
+
/// ExpandInlineAsm - This hook allows the target to expand an inline asm
/// call to be explicit llvm code if it wants to. This is useful for
/// turning simple inline asms into LLVM intrinsics, which gives the
@@ -1251,7 +1251,7 @@ public:
virtual bool ExpandInlineAsm(CallInst *CI) const {
return false;
}
-
+
enum ConstraintType {
C_Register, // Constraint represents specific register(s).
C_RegisterClass, // Constraint represents any of register(s) in class.
@@ -1259,7 +1259,7 @@ public:
C_Other, // Something else.
C_Unknown // Unsupported constraint.
};
-
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1271,25 +1271,25 @@ public:
/// ConstraintType - Information about the constraint code, e.g. Register,
/// RegisterClass, Memory, Other, Unknown.
TargetLowering::ConstraintType ConstraintType;
-
+
/// CallOperandval - If this is the result output operand or a
/// clobber, this is null, otherwise it is the incoming operand to the
/// CallInst. This gets modified as the asm is processed.
Value *CallOperandVal;
-
+
/// ConstraintVT - The ValueType for the operand value.
EVT ConstraintVT;
-
+
/// isMatchingInputConstraint - Return true of this is an input operand that
/// is a matching constraint like "4".
bool isMatchingInputConstraint() const;
-
+
/// getMatchedOperand - If this is an input matching constraint, this method
/// returns the output operand it matches.
unsigned getMatchedOperand() const;
-
+
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
- : InlineAsm::ConstraintInfo(info),
+ : InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
@@ -1299,21 +1299,19 @@ public:
/// type to use for the specific AsmOperandInfo, setting
/// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
/// being passed in is available, it can be passed in as Op, otherwise an
- /// empty SDValue can be passed. If hasMemory is true it means one of the asm
- /// constraint of the inline asm instruction being processed is 'm'.
+ /// empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- bool hasMemory,
SelectionDAG *DAG = 0) const;
-
+
/// getConstraintType - Given a constraint, return the type of constraint it
/// is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;
-
+
/// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
/// return a list of registers that can be used to satisfy the constraint.
/// This should only be used for C_RegisterClass constraints.
- virtual std::vector<unsigned>
+ virtual std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -1327,29 +1325,26 @@ public:
///
/// This should only be used for C_Register constraints. On error,
/// this returns a register number of 0 and a null register class pointer..
- virtual std::pair<unsigned, const TargetRegisterClass*>
+ virtual std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
-
+
/// LowerXConstraint - try to replace an X constraint, which matches anything,
/// with another that has more specific requirements based on the type of the
/// corresponding operand. This returns null if there is no replacement to
/// make.
virtual const char *LowerXConstraint(EVT ConstraintVT) const;
-
+
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
- /// it means one of the asm constraint of the inline asm instruction being
- /// processed is 'm'.
+ /// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
-
+
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
-
+
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
// instructions are special in various ways, which require special support to
@@ -1378,7 +1373,7 @@ public:
int64_t Scale;
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
-
+
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
/// The type may be VoidTy, in which case only return true if the addressing
@@ -1431,9 +1426,9 @@ public:
//===--------------------------------------------------------------------===//
// Div utility functions
//
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
+ SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
std::vector<SDNode*>* Created) const;
@@ -1470,7 +1465,7 @@ public:
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
LibcallCallingConvs[Call] = CC;
}
-
+
/// getLibcallCallingConv - Get the CallingConv that should be used for the
/// specified libcall.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
@@ -1499,12 +1494,12 @@ private:
/// a real cost model is in place. If we ever optimize for size, this will be
/// set to true unconditionally.
bool IntDivIsCheap;
-
+
/// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
/// srl/add/sra for a signed divide by power of two, and let the target handle
/// it.
bool Pow2DivIsCheap;
-
+
/// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
/// llvm.setjmp. Defaults to false.
bool UseUnderscoreSetJmp;
@@ -1524,26 +1519,28 @@ private:
/// SchedPreferenceInfo - The target scheduling preference: shortest possible
/// total cycles or lowest register usage.
Sched::Preference SchedPreferenceInfo;
-
+
/// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
unsigned JumpBufSize;
-
+
/// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
/// buffers
unsigned JumpBufAlignment;
- /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
- /// if-converted.
- unsigned IfCvtBlockSizeLimit;
-
- /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
- /// duplicated during if-conversion.
- unsigned IfCvtDupBlockSizeLimit;
+ /// MinStackArgumentAlignment - The minimum alignment that any argument
+ /// on the stack needs to have.
+ ///
+ unsigned MinStackArgumentAlignment;
/// PrefLoopAlignment - The perferred loop alignment.
///
unsigned PrefLoopAlignment;
+ /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
+ /// be folded into the enclosed atomic intrinsic instruction by the
+ /// combiner.
+ bool ShouldFoldAtomicFences;
+
/// StackPointerRegisterToSaveRestore - If set to a physical register, this
/// specifies the register that llvm.savestack/llvm.restorestack should save
/// and restore.
@@ -1583,12 +1580,12 @@ private:
/// operations that are not should be described. Note that operations on
/// non-legal value types are not described here.
uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
-
+
/// LoadExtActions - For each load extension type and each value type,
/// keep a LegalizeAction that indicates how instruction selection should deal
/// with a load of a specific value type and extension type.
uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
-
+
/// TruncStoreActions - For each value type pair keep a LegalizeAction that
/// indicates whether a truncating store of a specific value type and
/// truncating type is legal.
@@ -1600,7 +1597,7 @@ private:
/// value_type for the reference. The second dimension represents the various
/// modes for load store.
uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
-
+
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
/// deal with the condition code.
@@ -1615,7 +1612,7 @@ private:
/// which sets a bit in this array.
unsigned char
TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
-
+
/// PromoteToType - For operations that must be promoted to a specific type,
/// this holds the destination type. This map should be sparse, so don't hold
/// it as an array.
@@ -1676,6 +1673,15 @@ protected:
/// optimization.
bool benefitFromCodePlacementOpt;
};
+
+/// GetReturnInfo - Given an LLVM IR type and return type attributes,
+/// compute the return value EVTs and flags, and optionally also
+/// the offsets, if the return value is being lowered to memory.
+void GetReturnInfo(const Type* ReturnType, Attributes attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets = 0);
+
} // end llvm namespace
#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetOpcodes.h b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
index c4deaa8..cb772ec 100644
--- a/contrib/llvm/include/llvm/Target/TargetOpcodes.h
+++ b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
@@ -15,52 +15,54 @@
#define LLVM_TARGET_TARGETOPCODES_H
namespace llvm {
-
+
/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+/// Every instruction defined here must also appear in Target.td and the order
+/// must be the same as in CodeGenTarget.cpp.
+///
namespace TargetOpcode {
- enum {
+ enum {
PHI = 0,
INLINEASM = 1,
DBG_LABEL = 2,
EH_LABEL = 3,
GC_LABEL = 4,
-
+
/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
KILL = 5,
-
+
/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
EXTRACT_SUBREG = 6,
-
- /// INSERT_SUBREG - This instruction takes three operands: a register
- /// that has subregisters, a register providing an insert value, and a
- /// subregister index. It returns the value of the first register with
- /// the value of the second register inserted. The first register is
- /// often defined by an IMPLICIT_DEF, as is commonly used to implement
+
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
INSERT_SUBREG = 7,
-
+
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
IMPLICIT_DEF = 8,
-
- /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except
- /// that the first operand is an immediate integer constant. This constant
- /// is often zero, as is commonly used to implement zext operations on
- /// target architectures which support it, such as with x86-64 (with
- /// zext from i32 to i64 via implicit zero-extension).
+
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
SUBREG_TO_REG = 9,
-
+
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
/// used between instruction selection and MachineInstr creation, before
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
- /// instructions are insufficient. The actual MachineInstrs to perform
- /// the copy are emitted with the TargetInstrInfo::copyRegToReg hook.
+ /// instructions are insufficient. It is emitted as a COPY MachineInstr.
COPY_TO_REGCLASS = 10,
/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
@@ -72,7 +74,11 @@ namespace TargetOpcode {
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
/// After register coalescing references of v1024 should be replace with
/// v1027:3, v1025 with v1027:4, etc.
- REG_SEQUENCE = 12
+ REG_SEQUENCE = 12,
+
+ /// COPY - Target-independent register copy. This instruction can also be
+ /// used to copy between subregisters of virtual registers.
+ COPY = 13
};
} // end namespace TargetOpcode
} // end namespace llvm
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
index a316c70..b369880 100644
--- a/contrib/llvm/include/llvm/Target/TargetOptions.h
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -68,7 +68,7 @@ namespace llvm {
/// this flag is off (the default), the code generator is not allowed to
/// produce results that are "less precise" than IEEE allows. This includes
/// use of X86 instructions like FSIN and FCOS instead of libcalls.
- /// UnsafeFPMath implies FiniteOnlyFPMath and LessPreciseFPMAD.
+ /// UnsafeFPMath implies LessPreciseFPMAD.
extern bool UnsafeFPMath;
/// FiniteOnlyFPMath - This returns true when the -enable-finite-only-fp-math
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
index 7c37b73..f6ac2b7 100644
--- a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -115,6 +115,11 @@ public:
return RegSet.count(Reg);
}
+ /// contains - Return true if both registers are in this class.
+ bool contains(unsigned Reg1, unsigned Reg2) const {
+ return contains(Reg1) && contains(Reg2);
+ }
+
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
@@ -313,11 +318,11 @@ public:
return Reg >= FirstVirtualRegister;
}
- /// getPhysicalRegisterRegClass - Returns the Register Class of a physical
- /// register of the given type. If type is EVT::Other, then just return any
- /// register class the register belongs to.
- virtual const TargetRegisterClass *
- getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
+ /// getMinimalPhysRegClass - Returns the Register Class of a physical
+ /// register of the given type, picking the most sub register class of
+ /// the right type that contains this physreg.
+ const TargetRegisterClass *
+ getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is
@@ -438,11 +443,6 @@ public:
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF = 0)
const = 0;
- /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred
- /// register classes to spill each callee saved register with. The order and
- /// length of this list match the getCalleeSaveRegs() list.
- virtual const TargetRegisterClass* const *getCalleeSavedRegClasses(
- const MachineFunction *MF) const =0;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses
@@ -456,7 +456,7 @@ public:
virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0;
/// getSubRegIndex - For a given register pair, return the sub-register index
- /// if the are second register is a sub-register of the first. Return zero
+ /// if the second register is a sub-register of the first. Return zero
/// otherwise.
virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0;
@@ -470,14 +470,15 @@ public:
return 0;
}
- /// canCombinedSubRegIndex - Given a register class and a list of sub-register
- /// indices, return true if it's possible to combine the sub-register indices
- /// into one that corresponds to a larger sub-register. Return the new sub-
- /// register index by reference. Note the new index by be zero if the given
- /// sub-registers combined to form the whole register.
- virtual bool canCombinedSubRegIndex(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const {
+ /// canCombineSubRegIndices - Given a register class and a list of
+ /// subregister indices, return true if it's possible to combine the
+ /// subregister indices into one that corresponds to a larger
+ /// subregister. Return the new subregister index by reference. Note the
+ /// new index may be zero if the given subregisters can be combined to
+ /// form the whole register.
+ virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
+ SmallVectorImpl<unsigned> &SubIndices,
+ unsigned &NewSubIdx) const {
return 0;
}
@@ -490,6 +491,23 @@ public:
return 0;
}
+ /// composeSubRegIndices - Return the subregister index you get from composing
+ /// two subregister indices.
+ ///
+ /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+ /// returns c. Note that composeSubRegIndices does not tell you about illegal
+ /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+ /// b, composeSubRegIndices doesn't tell you.
+ ///
+ /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+ /// ssub_0:S0 - ssub_3:S3 subregs.
+ /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+ ///
+ virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ // This default implementation is correct for most targets.
+ return b;
+ }
+
//===--------------------------------------------------------------------===//
// Register Class Information
//
@@ -506,8 +524,8 @@ public:
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class TargetOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
- assert(i <= getNumRegClasses() && "Register Class ID out of range");
- return i ? RegClassBegin[i - 1] : NULL;
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return RegClassBegin[i];
}
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
index 5e17904..8fb4b63 100644
--- a/contrib/llvm/include/llvm/Transforms/IPO.h
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -45,6 +45,11 @@ ModulePass *createStripNonDebugSymbolsPass();
ModulePass *createStripDebugDeclarePass();
//===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
/// createLowerSetJmpPass - This function lowers the setjmp/longjmp intrinsics
/// to invoke/unwind instructions. This should really be part of the C/C++
/// front-end, but it's so much easier to write transformations in LLVM proper.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
index 5279e96..0f54450 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -66,24 +66,6 @@ void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
//
void ReplaceInstWithInst(Instruction *From, Instruction *To);
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0);
-
/// FindFunctionBackedges - Analyze the specified function to find all of the
/// loop backedges in the function and return them. This is a relatively cheap
/// (compared to computing dominators and loop info) analysis.
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
index 6df3469..c75c142 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -34,6 +34,10 @@ namespace llvm {
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD);
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const TargetData *TD);
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
index 22bdc99..1ca4981 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -18,7 +18,7 @@
#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
#define LLVM_TRANSFORMS_UTILS_CLONING_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ValueHandle.h"
@@ -46,7 +46,7 @@ class AllocaInst;
/// CloneModule - Return an exact copy of the specified module
///
Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, DenseMap<const Value*, Value*> &ValueMap);
+Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
/// ClonedCodeInfo - This struct can be used to capture information about code
/// being cloned, while it is being cloned.
@@ -89,7 +89,7 @@ struct ClonedCodeInfo {
/// incoming edges.
///
/// The correlation between instructions in the source and result basic blocks
-/// is recorded in the ValueMap map.
+/// is recorded in the VMap map.
///
/// If you have a particular suffix you'd like to use to add to any cloned
/// names, specify it as the optional third parameter.
@@ -102,34 +102,34 @@ struct ClonedCodeInfo {
/// parameter.
///
BasicBlock *CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
const Twine &NameSuffix = "", Function *F = 0,
ClonedCodeInfo *CodeInfo = 0);
/// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
-/// ValueMap using old blocks to new blocks mapping.
+/// VMap using old blocks to new blocks mapping.
Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P);
+ ValueMap<const Value *, Value *> &VMap, Pass *P);
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values. The final argument captures
/// information about the cloned code if non-null.
///
Function *CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
ClonedCodeInfo *CodeInfo = 0);
-/// CloneFunction - Version of the function that doesn't need the ValueMap.
+/// CloneFunction - Version of the function that doesn't need the VMap.
///
inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
- DenseMap<const Value*, Value*> ValueMap;
- return CloneFunction(F, ValueMap, CodeInfo);
+ ValueMap<const Value*, Value*> VMap;
+ return CloneFunction(F, VMap, CodeInfo);
}
/// Clone OldFunc into NewFunc, transforming the old arguments into references
@@ -139,7 +139,7 @@ inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
/// specified suffix to all values cloned.
///
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0);
@@ -152,7 +152,7 @@ void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// dead. Since this doesn't produce an exactly copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0,
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
index bb6fd56..b277970 100644
--- a/contrib/llvm/include/llvm/Transforms/Utils/Local.h
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -31,17 +31,6 @@ class TargetData;
template<typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD = 0);
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
diff --git a/contrib/llvm/include/llvm/Type.h b/contrib/llvm/include/llvm/Type.h
index 52229ac..617ef69 100644
--- a/contrib/llvm/include/llvm/Type.h
+++ b/contrib/llvm/include/llvm/Type.h
@@ -504,19 +504,19 @@ inline void PATypeHandle::removeUser() {
/// reference to the type.
///
inline Type* PATypeHolder::get() const {
+ if (Ty == 0) return 0;
const Type *NewTy = Ty->getForwardedType();
if (!NewTy) return const_cast<Type*>(Ty);
return *const_cast<PATypeHolder*>(this) = NewTy;
}
inline void PATypeHolder::addRef() {
- assert(Ty && "Type Holder has a null type!");
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->addRef();
}
inline void PATypeHolder::dropRef() {
- if (Ty->isAbstract())
+ if (Ty && Ty->isAbstract())
Ty->dropRef();
}
diff --git a/contrib/llvm/include/llvm/Use.h b/contrib/llvm/include/llvm/Use.h
index 970f69b..2759338 100644
--- a/contrib/llvm/include/llvm/Use.h
+++ b/contrib/llvm/include/llvm/Use.h
@@ -27,6 +27,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/PointerIntPair.h"
+#include <cstddef>
#include <iterator>
namespace llvm {
diff --git a/contrib/llvm/include/llvm/Value.h b/contrib/llvm/include/llvm/Value.h
index bc25a0f..cfb4422 100644
--- a/contrib/llvm/include/llvm/Value.h
+++ b/contrib/llvm/include/llvm/Value.h
@@ -93,8 +93,8 @@ protected:
/// printing behavior.
virtual void printCustom(raw_ostream &O) const;
-public:
Value(const Type *Ty, unsigned scid);
+public:
virtual ~Value();
/// dump - Support for debugging, callable in GDB: V->dump()
@@ -210,7 +210,7 @@ public:
UndefValueVal, // This is an instance of UndefValue
BlockAddressVal, // This is an instance of BlockAddress
ConstantExprVal, // This is an instance of ConstantExpr
- ConstantAggregateZeroVal, // This is an instance of ConstantAggregateNull
+ ConstantAggregateZeroVal, // This is an instance of ConstantAggregateZero
ConstantIntVal, // This is an instance of ConstantInt
ConstantFPVal, // This is an instance of ConstantFP
ConstantArrayVal, // This is an instance of ConstantArray
@@ -266,6 +266,10 @@ public:
SubclassOptionalData &= V->SubclassOptionalData;
}
+ /// hasValueHandle - Return true if there is a value handle associated with
+ /// this value.
+ bool hasValueHandle() const { return HasValueHandle; }
+
// Methods for support type inquiry through isa, cast, and dyn_cast:
static inline bool classof(const Value *) {
return true; // Values are always values.
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
index 371dcaf..503fbbd 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -233,10 +233,12 @@ bool llvm::isNoAliasCall(const Value *V) {
/// NoAlias returns
///
bool llvm::isIdentifiedObject(const Value *V) {
- if (isa<AllocaInst>(V) || isNoAliasCall(V))
+ if (isa<AllocaInst>(V))
return true;
if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
return true;
+ if (isNoAliasCall(V))
+ return true;
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
diff --git a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index bfa3ff1..37ee9fc 100644
--- a/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/contrib/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -25,7 +25,6 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/InstIterator.h"
#include "llvm/Support/CommandLine.h"
diff --git a/contrib/llvm/lib/Analysis/AliasDebugger.cpp b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
index 88c2875..bc2d9c55 100644
--- a/contrib/llvm/lib/Analysis/AliasDebugger.cpp
+++ b/contrib/llvm/lib/Analysis/AliasDebugger.cpp
@@ -45,8 +45,12 @@ namespace {
InitializeAliasAnalysis(this); // set up super class
for(Module::global_iterator I = M.global_begin(),
- E = M.global_end(); I != E; ++I)
+ E = M.global_end(); I != E; ++I) {
Vals.insert(&*I);
+ for (User::const_op_iterator OI = I->op_begin(),
+ OE = I->op_end(); OI != OE; ++OI)
+ Vals.insert(*OI);
+ }
for(Module::iterator I = M.begin(),
E = M.end(); I != E; ++I){
@@ -58,8 +62,12 @@ namespace {
for (Function::const_iterator FI = I->begin(), FE = I->end();
FI != FE; ++FI)
for (BasicBlock::const_iterator BI = FI->begin(), BE = FI->end();
- BI != BE; ++BI)
+ BI != BE; ++BI) {
Vals.insert(&*BI);
+ for (User::const_op_iterator OI = BI->op_begin(),
+ OE = BI->op_end(); OI != OE; ++OI)
+ Vals.insert(*OI);
+ }
}
}
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index cfe7a1c..4f53a6d 100644
--- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -78,6 +78,20 @@ static bool isNonEscapingLocalObject(const Value *V) {
return false;
}
+/// isEscapeSource - Return true if the pointer is one which would have
+/// been considered an escape by isNonEscapingLocalObject.
+static bool isEscapeSource(const Value *V) {
+ if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
+ return true;
+
+ // The load case works because isNonEscapingLocalObject considers all
+ // stores to be escapes (it passes true for the StoreCaptures argument
+ // to PointerMayBeCaptured).
+ if (isa<LoadInst>(V))
+ return true;
+
+ return false;
+}
/// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size.
@@ -94,7 +108,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
} else if (const CallInst* CI = extractMallocCall(V)) {
if (!isArrayMalloc(V, &TD))
// The size is the argument to the malloc call.
- if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1)))
+ if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
return (C->getZExtValue() < Size);
return false;
} else if (const Argument *A = dyn_cast<Argument>(V)) {
@@ -177,9 +191,29 @@ static RegisterAnalysisGroup<AliasAnalysis> V(U);
ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
//===----------------------------------------------------------------------===//
-// BasicAA Pass
+// BasicAliasAnalysis Pass
//===----------------------------------------------------------------------===//
+#ifndef NDEBUG
+static const Function *getParent(const Value *V) {
+ if (const Instruction *inst = dyn_cast<Instruction>(V))
+ return inst->getParent()->getParent();
+
+ if (const Argument *arg = dyn_cast<Argument>(V))
+ return arg->getParent();
+
+ return NULL;
+}
+
+static bool notDifferentParent(const Value *O1, const Value *O2) {
+
+ const Function *F1 = getParent(O1);
+ const Function *F2 = getParent(O2);
+
+ return !F1 || !F2 || F1 == F2;
+}
+#endif
+
namespace {
/// BasicAliasAnalysis - This is the default alias analysis implementation.
/// Because it doesn't chain to a previous alias analysis (like -no-aa), it
@@ -187,11 +221,14 @@ namespace {
struct BasicAliasAnalysis : public NoAA {
static char ID; // Class identification, replacement for typeinfo
BasicAliasAnalysis() : NoAA(&ID) {}
+
AliasResult alias(const Value *V1, unsigned V1Size,
const Value *V2, unsigned V2Size) {
- assert(VisitedPHIs.empty() && "VisitedPHIs must be cleared after use!");
+ assert(Visited.empty() && "Visited must be cleared after use!");
+ assert(notDifferentParent(V1, V2) &&
+ "BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
- VisitedPHIs.clear();
+ Visited.clear();
return Alias;
}
@@ -213,8 +250,8 @@ namespace {
}
private:
- // VisitedPHIs - Track PHI nodes visited by a aliasCheck() call.
- SmallPtrSet<const Value*, 16> VisitedPHIs;
+ // Visited - Track instructions visited by a aliasPHI, aliasSelect(), and aliasGEP().
+ SmallPtrSet<const Value*, 16> Visited;
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
// instruction against another.
@@ -268,6 +305,9 @@ bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
+ assert(notDifferentParent(CS.getInstruction(), P) &&
+ "AliasAnalysis query involving multiple functions!");
+
const Value *Object = P->getUnderlyingObject();
// If this is a tail call and P points to a stack location, we know that
@@ -318,10 +358,10 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
case Intrinsic::memcpy:
case Intrinsic::memmove: {
unsigned Len = ~0U;
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
- Value *Src = II->getOperand(2);
+ Value *Dest = II->getArgOperand(0);
+ Value *Src = II->getArgOperand(1);
if (isNoAlias(Dest, Len, P, Size)) {
if (isNoAlias(Src, Len, P, Size))
return NoModRef;
@@ -332,9 +372,9 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
case Intrinsic::memset:
// Since memset is 'accesses arguments' only, the AliasAnalysis base class
// will handle it for the variable length case.
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
unsigned Len = LenCI->getZExtValue();
- Value *Dest = II->getOperand(1);
+ Value *Dest = II->getArgOperand(0);
if (isNoAlias(Dest, Len, P, Size))
return NoModRef;
}
@@ -352,7 +392,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
case Intrinsic::atomic_load_umax:
case Intrinsic::atomic_load_umin:
if (TD) {
- Value *Op1 = II->getOperand(1);
+ Value *Op1 = II->getArgOperand(0);
unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
if (isNoAlias(Op1, Op1Size, P, Size))
return NoModRef;
@@ -361,14 +401,14 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
- if (isNoAlias(II->getOperand(2), PtrSize, P, Size))
+ unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
return NoModRef;
break;
}
case Intrinsic::invariant_end: {
- unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
- if (isNoAlias(II->getOperand(3), PtrSize, P, Size))
+ unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+ if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
return NoModRef;
break;
}
@@ -440,6 +480,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
const Value *V2, unsigned V2Size,
const Value *UnderlyingV1,
const Value *UnderlyingV2) {
+ // If this GEP has been visited before, we're on a use-def cycle.
+ // Such cycles are only valid when PHI nodes are involved or in unreachable
+ // code. The visitPHI function catches cycles containing PHIs, but there
+ // could still be a cycle without PHIs in unreachable code.
+ if (!Visited.insert(GEP1))
+ return MayAlias;
+
int64_t GEP1BaseOffset;
SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
@@ -550,6 +597,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, unsigned V1Size,
AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
const Value *V2, unsigned V2Size) {
+ // If this select has been visited before, we're on a use-def cycle.
+ // Such cycles are only valid when PHI nodes are involved or in unreachable
+ // code. The visitPHI function catches cycles containing PHIs, but there
+ // could still be a cycle without PHIs in unreachable code.
+ if (!Visited.insert(SI))
+ return MayAlias;
+
// If the values are Selects with the same condition, we can do a more precise
// check: just check for aliases between the values on corresponding arms.
if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
@@ -570,11 +624,17 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
// If both arms of the Select node NoAlias or MustAlias V2, then returns
// NoAlias / MustAlias. Otherwise, returns MayAlias.
AliasResult Alias =
- aliasCheck(SI->getTrueValue(), SISize, V2, V2Size);
+ aliasCheck(V2, V2Size, SI->getTrueValue(), SISize);
if (Alias == MayAlias)
return MayAlias;
+
+ // If V2 is visited, the recursive case will have been caught in the
+ // above aliasCheck call, so these subsequent calls to aliasCheck
+ // don't need to assume that V2 is being visited recursively.
+ Visited.erase(V2);
+
AliasResult ThisAlias =
- aliasCheck(SI->getFalseValue(), SISize, V2, V2Size);
+ aliasCheck(V2, V2Size, SI->getFalseValue(), SISize);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
@@ -586,7 +646,7 @@ AliasAnalysis::AliasResult
BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
const Value *V2, unsigned V2Size) {
// The PHI node has already been visited, avoid recursion any further.
- if (!VisitedPHIs.insert(PN))
+ if (!Visited.insert(PN))
return MayAlias;
// If the values are PHIs in the same block, we can do a more precise
@@ -636,10 +696,10 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
Value *V = V1Srcs[i];
- // If V2 is a PHI, the recursive case will have been caught in the
+ // If V2 is visited, the recursive case will have been caught in the
// above aliasCheck call, so these subsequent calls to aliasCheck
// don't need to assume that V2 is being visited recursively.
- VisitedPHIs.erase(V2);
+ Visited.erase(V2);
AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
if (ThisAlias != Alias || ThisAlias == MayAlias)
@@ -693,17 +753,32 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
(isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
return NoAlias;
- // Arguments can't alias with local allocations or noalias calls.
- if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
- (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))
+ // Arguments can't alias with local allocations or noalias calls
+ // in the same function.
+ if (((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
+ (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1)))))
return NoAlias;
// Most objects can't alias null.
- if ((isa<ConstantPointerNull>(V2) && isKnownNonNull(O1)) ||
- (isa<ConstantPointerNull>(V1) && isKnownNonNull(O2)))
+ if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
+ (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
return NoAlias;
- }
+ // If one pointer is the result of a call/invoke or load and the other is a
+ // non-escaping local object within the same function, then we know the
+ // object couldn't escape to a point where the call could return it.
+ //
+ // Note that if the pointers are in different functions, there are a
+ // variety of complications. A call with a nocapture argument may still
+ // temporary store the nocapture argument's value in a temporary memory
+ // location if that memory location doesn't escape. Or it may pass a
+ // nocapture value to other functions as long as they don't capture it.
+ if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
+ return NoAlias;
+ if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
+ return NoAlias;
+ }
+
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (TD)
@@ -711,22 +786,6 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
(V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
return NoAlias;
- // If one pointer is the result of a call/invoke or load and the other is a
- // non-escaping local object, then we know the object couldn't escape to a
- // point where the call could return it. The load case works because
- // isNonEscapingLocalObject considers all stores to be escapes (it
- // passes true for the StoreCaptures argument to PointerMayBeCaptured).
- if (O1 != O2) {
- if ((isa<CallInst>(O1) || isa<InvokeInst>(O1) || isa<LoadInst>(O1) ||
- isa<Argument>(O1)) &&
- isNonEscapingLocalObject(O2))
- return NoAlias;
- if ((isa<CallInst>(O2) || isa<InvokeInst>(O2) || isa<LoadInst>(O2) ||
- isa<Argument>(O2)) &&
- isNonEscapingLocalObject(O1))
- return NoAlias;
- }
-
// FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
// GEP can't simplify, we don't even look at the PHI cases.
if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
diff --git a/contrib/llvm/lib/Analysis/CMakeLists.txt b/contrib/llvm/lib/Analysis/CMakeLists.txt
index 5a37ce0..d9b670d 100644
--- a/contrib/llvm/lib/Analysis/CMakeLists.txt
+++ b/contrib/llvm/lib/Analysis/CMakeLists.txt
@@ -23,6 +23,7 @@ add_llvm_library(LLVMAnalysis
LibCallSemantics.cpp
Lint.cpp
LiveValues.cpp
+ Loads.cpp
LoopDependenceAnalysis.cpp
LoopInfo.cpp
LoopPass.cpp
diff --git a/contrib/llvm/lib/Analysis/ConstantFolding.cpp b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
index 37cda02..13d8f4d 100644
--- a/contrib/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/contrib/llvm/lib/Analysis/ConstantFolding.cpp
@@ -208,7 +208,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
i != e; ++i, ++GTI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
- if (CI->getZExtValue() == 0) continue; // Not adding anything.
+ if (CI->isZero()) continue; // Not adding anything.
if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
@@ -436,8 +436,10 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
unsigned StrLen = Str.length();
const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
- // Replace LI with immediate integer store.
- if ((NumBits >> 3) == StrLen + 1) {
+ // Replace load with immediate integer if the result is an integer or fp
+ // value.
+ if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
+ (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
APInt StrVal(NumBits, 0);
APInt SingleChar(NumBits, 0);
if (TD->isLittleEndian()) {
@@ -454,7 +456,11 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
- return ConstantInt::get(CE->getContext(), StrVal);
+
+ Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
+ if (Ty->isFloatingPointTy())
+ Res = ConstantExpr::getBitCast(Res, Ty);
+ return Res;
}
}
@@ -772,9 +778,9 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
case Instruction::ICmp:
case Instruction::FCmp: assert(0 && "Invalid for compares");
case Instruction::Call:
- if (Function *F = dyn_cast<Function>(Ops[0]))
+ if (Function *F = dyn_cast<Function>(Ops[CallInst::ArgOffset ? 0:NumOps-1]))
if (canConstantFoldCallTo(F))
- return ConstantFoldCall(F, Ops+1, NumOps-1);
+ return ConstantFoldCall(F, Ops+CallInst::ArgOffset, NumOps-1);
return 0;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
diff --git a/contrib/llvm/lib/Analysis/DebugInfo.cpp b/contrib/llvm/lib/Analysis/DebugInfo.cpp
index a7b6d2b..c8d0d22 100644
--- a/contrib/llvm/lib/Analysis/DebugInfo.cpp
+++ b/contrib/llvm/lib/Analysis/DebugInfo.cpp
@@ -73,6 +73,15 @@ GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const {
return 0;
}
+Function *DIDescriptor::getFunctionField(unsigned Elt) const {
+ if (DbgNode == 0)
+ return 0;
+
+ if (Elt < DbgNode->getNumOperands())
+ return dyn_cast_or_null<Function>(DbgNode->getOperand(Elt));
+ return 0;
+}
+
unsigned DIVariable::getNumAddrElements() const {
return DbgNode->getNumOperands()-6;
}
@@ -397,6 +406,8 @@ bool DIVariable::isInlinedFnArgument(const Function *CurFn) {
/// information for the function F.
bool DISubprogram::describes(const Function *F) {
assert(F && "Invalid function");
+ if (F == getFunction())
+ return true;
StringRef Name = getLinkageName();
if (Name.empty())
Name = getName();
@@ -938,7 +949,8 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
unsigned VK, unsigned VIndex,
DIType ContainingType,
bool isArtificial,
- bool isOptimized) {
+ bool isOptimized,
+ Function *Fn) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
@@ -956,9 +968,15 @@ DISubprogram DIFactory::CreateSubprogram(DIDescriptor Context,
ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
ContainingType,
ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial),
- ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized)
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ Fn
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 16));
+ MDNode *Node = MDNode::get(VMContext, &Elts[0], 17);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
@@ -984,9 +1002,15 @@ DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration)
DeclNode->getOperand(12), // VIndex
DeclNode->getOperand(13), // Containting Type
DeclNode->getOperand(14), // isArtificial
- DeclNode->getOperand(15) // isOptimized
+ DeclNode->getOperand(15), // isOptimized
+ SPDeclaration.getFunction()
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 16));
+ MDNode *Node =MDNode::get(VMContext, &Elts[0], 16);
+
+ // Create a named metadata so that we do not lose this mdnode.
+ NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(Node);
+ return DISubprogram(Node);
}
/// CreateGlobalVariable - Create a new descriptor for the specified global.
@@ -1042,8 +1066,18 @@ DIVariable DIFactory::CreateVariable(unsigned Tag, DIDescriptor Context,
// The optimizer may remove local variable. If there is an interest
// to preserve variable info in such situation then stash it in a
// named mdnode.
- NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.lv");
- NMD->addOperand(Node);
+ DISubprogram Fn(getDISubprogram(Context));
+ StringRef FName = "fn";
+ if (Fn.getFunction())
+ FName = Fn.getFunction()->getName();
+ char One = '\1';
+ if (FName.startswith(StringRef(&One, 1)))
+ FName = FName.substr(1);
+ NamedMDNode *FnLocals = M.getNamedMetadata(Twine("llvm.dbg.lv.", FName));
+ if (!FnLocals)
+ FnLocals = NamedMDNode::Create(VMContext, Twine("llvm.dbg.lv.", FName),
+ NULL, 0, &M);
+ FnLocals->addOperand(Node);
}
return DIVariable(Node);
}
@@ -1110,18 +1144,6 @@ DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
return DILocation(MDNode::get(VMContext, &Elts[0], 4));
}
-/// CreateLocation - Creates a debug info location.
-DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
- DIScope S, MDNode *OrigLoc) {
- Value *Elts[] = {
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
- S,
- OrigLoc
- };
- return DILocation(MDNode::get(VMContext, &Elts[0], 4));
-}
-
//===----------------------------------------------------------------------===//
// DIFactory: Routines for inserting code into a function
//===----------------------------------------------------------------------===//
@@ -1218,17 +1240,19 @@ void DebugInfoFinder::processModule(Module &M) {
processLocation(DILocation(IA));
}
- NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
- if (!NMD)
- return;
-
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
- DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
- if (addGlobalVariable(DIG)) {
- addCompileUnit(DIG.getCompileUnit());
- processType(DIG.getType());
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv")) {
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
+ if (addGlobalVariable(DIG)) {
+ addCompileUnit(DIG.getCompileUnit());
+ processType(DIG.getType());
+ }
}
}
+
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp"))
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ processSubprogram(DISubprogram(NMD->getOperand(i)));
}
/// processLocation - Process DILocation.
diff --git a/contrib/llvm/lib/Analysis/DomPrinter.cpp b/contrib/llvm/lib/Analysis/DomPrinter.cpp
index a1676e5..d95c376 100644
--- a/contrib/llvm/lib/Analysis/DomPrinter.cpp
+++ b/contrib/llvm/lib/Analysis/DomPrinter.cpp
@@ -43,10 +43,10 @@ struct DOTGraphTraits<DomTreeNode*> : public DefaultDOTGraphTraits {
if (isSimple())
return DOTGraphTraits<const Function*>
- ::getSimpleNodeLabel(BB, BB->getParent());
+ ::getSimpleNodeLabel(BB, BB->getParent());
else
return DOTGraphTraits<const Function*>
- ::getCompleteNodeLabel(BB, BB->getParent());
+ ::getCompleteNodeLabel(BB, BB->getParent());
}
};
diff --git a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
index 2bde56d7..65c7c6e 100644
--- a/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/CallGraph.cpp
@@ -126,13 +126,15 @@ private:
}
// Loop over all of the users of the function, looking for non-call uses.
- for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I)
- if ((!isa<CallInst>(I) && !isa<InvokeInst>(I))
- || !CallSite(cast<Instruction>(I)).isCallee(I)) {
+ for (Value::use_iterator I = F->use_begin(), E = F->use_end(); I != E; ++I){
+ User *U = *I;
+ if ((!isa<CallInst>(U) && !isa<InvokeInst>(U))
+ || !CallSite(cast<Instruction>(U)).isCallee(I)) {
// Not a call, or being used as a parameter rather than as the callee.
ExternalCallingNode->addCalledFunction(CallSite(), Node);
break;
}
+ }
// If this function is not defined in this translation unit, it could call
// anything.
diff --git a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
index b14afa3..f13deea 100644
--- a/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/contrib/llvm/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -233,33 +233,34 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
GlobalValue *OkayStoreDest) {
if (!V->getType()->isPointerTy()) return true;
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ for (Value::use_iterator UI = V->use_begin(), E=V->use_end(); UI != E; ++UI) {
+ User *U = *UI;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
Readers.push_back(LI->getParent()->getParent());
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (V == SI->getOperand(1)) {
Writers.push_back(SI->getParent()->getParent());
} else if (SI->getOperand(1) != OkayStoreDest) {
return true; // Storing the pointer
}
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
if (AnalyzeUsesOfPointer(GEP, Readers, Writers)) return true;
- } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
+ } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true;
- } else if (isFreeCall(*UI)) {
- Writers.push_back(cast<Instruction>(*UI)->getParent()->getParent());
- } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
+ } else if (isFreeCall(U)) {
+ Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
+ } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is
// passing into the function.
- for (unsigned i = 1, e = CI->getNumOperands(); i != e; ++i)
- if (CI->getOperand(i) == V) return true;
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
+ if (CI->getArgOperand(i) == V) return true;
+ } else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
// Make sure that this is just the function being called, not that it is
// passing into the function.
- for (unsigned i = 0, e = II->getNumOperands() - 3; i != e; ++i)
- if (II->getOperand(i) == V) return true;
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+ for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i)
+ if (II->getArgOperand(i) == V) return true;
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
if (CE->getOpcode() == Instruction::GetElementPtr ||
CE->getOpcode() == Instruction::BitCast) {
if (AnalyzeUsesOfPointer(CE, Readers, Writers))
@@ -267,12 +268,14 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
} else {
return true;
}
- } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {
+ } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
return true; // Allow comparison against null.
} else {
return true;
}
+ }
+
return false;
}
@@ -291,7 +294,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Walk the user list of the global. If we find anything other than a direct
// load or store, bail out.
for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
- if (LoadInst *LI = dyn_cast<LoadInst>(*I)) {
+ User *U = *I;
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
// The pointer loaded from the global can only be used in simple ways:
// we allow addressing of it and loading storing to it. We do *not* allow
// storing the loaded pointer somewhere else or passing to a function.
@@ -299,7 +303,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
if (AnalyzeUsesOfPointer(LI, ReadersWriters, ReadersWriters))
return false; // Loaded pointer escapes.
// TODO: Could try some IP mod/ref of the loaded pointer.
- } else if (StoreInst *SI = dyn_cast<StoreInst>(*I)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// Storing the global itself.
if (SI->getOperand(0) == GV) return false;
diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp
index 98dbb69..b1df517 100644
--- a/contrib/llvm/lib/Analysis/InlineCost.cpp
+++ b/contrib/llvm/lib/Analysis/InlineCost.cpp
@@ -162,14 +162,14 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
if (Function *F = CS.getCalledFunction()) {
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
- NeverInline = true;
+ callsSetJmp = true;
// If this call is to function itself, then the function is recursive.
// Inlining it into other functions is a bad idea, because this is
// basically just a form of loop peeling, and our metrics aren't useful
// for that case.
if (F == BB->getParent())
- NeverInline = true;
+ isRecursive = true;
}
if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
@@ -220,7 +220,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
if (isa<IndirectBrInst>(BB->getTerminator()))
- NeverInline = true;
+ containsIndirectBr = true;
// Remember NumInsts for this BB.
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
@@ -247,7 +247,7 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
// Don't bother calculating argument weights if we are never going to inline
// the function anyway.
- if (Metrics.NeverInline)
+ if (NeverInline())
return;
// Check out all of the arguments to the function, figuring out how much
@@ -258,6 +258,14 @@ void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
CountCodeReductionForAlloca(I)));
}
+/// NeverInline - returns true if the function should never be inlined into
+/// any caller
+bool InlineCostAnalyzer::FunctionInfo::NeverInline()
+{
+ return (Metrics.callsSetJmp || Metrics.isRecursive ||
+ Metrics.containsIndirectBr);
+
+}
// getInlineCost - The heuristic used to determine if we should inline the
// function call or not.
//
@@ -315,7 +323,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
CalleeFI->analyzeFunction(Callee);
// If we should never inline this, return a huge cost.
- if (CalleeFI->Metrics.NeverInline)
+ if (CalleeFI->NeverInline())
return InlineCost::getNever();
// FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
@@ -443,10 +451,15 @@ InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
}
// Since CalleeMetrics were already calculated, we know that the CallerMetrics
- // reference isn't invalidated: both were in the DenseMap.
- CallerMetrics.NeverInline |= CalleeMetrics.NeverInline;
+ // reference isn't invalidated: both were in the DenseMap.
CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
+ // FIXME: If any of these three are true for the callee, the callee was
+ // not inlined into the caller, so I think they're redundant here.
+ CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
+ CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
+ CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
+
CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
index dbefc2d..24cd343 100644
--- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -440,27 +440,47 @@ void llvm::ReplaceAndSimplifyAllUses(Instruction *From, Value *To,
const TargetData *TD) {
assert(From != To && "ReplaceAndSimplifyAllUses(X,X) is not valid!");
- // FromHandle - This keeps a weakvh on the from value so that we can know if
- // it gets deleted out from under us in a recursive simplification.
+ // FromHandle/ToHandle - This keeps a WeakVH on the from/to values so that
+ // we can know if it gets deleted out from under us or replaced in a
+ // recursive simplification.
WeakVH FromHandle(From);
+ WeakVH ToHandle(To);
while (!From->use_empty()) {
// Update the instruction to use the new value.
- Use &U = From->use_begin().getUse();
- Instruction *User = cast<Instruction>(U.getUser());
- U = To;
+ Use &TheUse = From->use_begin().getUse();
+ Instruction *User = cast<Instruction>(TheUse.getUser());
+ TheUse = To;
+
+ // Check to see if the instruction can be folded due to the operand
+ // replacement. For example changing (or X, Y) into (or X, -1) can replace
+ // the 'or' with -1.
+ Value *SimplifiedVal;
+ {
+ // Sanity check to make sure 'User' doesn't dangle across
+ // SimplifyInstruction.
+ AssertingVH<> UserHandle(User);
- // See if we can simplify it.
- if (Value *V = SimplifyInstruction(User, TD)) {
- // Recursively simplify this.
- ReplaceAndSimplifyAllUses(User, V, TD);
-
- // If the recursive simplification ended up revisiting and deleting 'From'
- // then we're done.
- if (FromHandle == 0)
- return;
+ SimplifiedVal = SimplifyInstruction(User, TD);
+ if (SimplifiedVal == 0) continue;
}
+
+ // Recursively simplify this user to the new value.
+ ReplaceAndSimplifyAllUses(User, SimplifiedVal, TD);
+ From = dyn_cast_or_null<Instruction>((Value*)FromHandle);
+ To = ToHandle;
+
+ assert(ToHandle && "To value deleted by recursive simplification?");
+
+ // If the recursive simplification ended up revisiting and deleting
+ // 'From' then we're done.
+ if (From == 0)
+ return;
}
+
+ // If 'From' has value handles referring to it, do a real RAUW to update them.
+ From->replaceAllUsesWith(To);
+
From->eraseFromParent();
}
diff --git a/contrib/llvm/lib/Analysis/Lint.cpp b/contrib/llvm/lib/Analysis/Lint.cpp
index a031cbc..9f1b30d 100644
--- a/contrib/llvm/lib/Analysis/Lint.cpp
+++ b/contrib/llvm/lib/Analysis/Lint.cpp
@@ -19,7 +19,8 @@
//
// Another limitation is that it assumes all code will be executed. A store
// through a null pointer in a basic block which is never reached is harmless,
-// but this pass will warn about it anyway.
+// but this pass will warn about it anyway. This is the main reason why most
+// of these checks live here instead of in the Verifier pass.
//
// Optimization passes may make conditions that this pass checks for more or
// less obvious. If an optimization pass appears to be introducing a warning,
@@ -35,7 +36,11 @@
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h"
@@ -64,7 +69,8 @@ namespace {
void visitFunction(Function &F);
void visitCallSite(CallSite CS);
- void visitMemoryReference(Instruction &I, Value *Ptr, unsigned Align,
+ void visitMemoryReference(Instruction &I, Value *Ptr,
+ unsigned Size, unsigned Align,
const Type *Ty, unsigned Flags);
void visitCallInst(CallInst &I);
@@ -88,9 +94,14 @@ namespace {
void visitInsertElementInst(InsertElementInst &I);
void visitUnreachableInst(UnreachableInst &I);
+ Value *findValue(Value *V, bool OffsetOk) const;
+ Value *findValueImpl(Value *V, bool OffsetOk,
+ SmallPtrSet<Value *, 4> &Visited) const;
+
public:
Module *Mod;
AliasAnalysis *AA;
+ DominatorTree *DT;
TargetData *TD;
std::string Messages;
@@ -104,6 +115,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
+ AU.addRequired<DominatorTree>();
}
virtual void print(raw_ostream &O, const Module *M) const {}
@@ -176,6 +188,7 @@ X("lint", "Statically lint-checks LLVM IR", false, true);
bool Lint::runOnFunction(Function &F) {
Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>();
+ DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>();
visit(F);
dbgs() << MessagesStr.str();
@@ -188,15 +201,17 @@ void Lint::visitFunction(Function &F) {
// fairly common mistake to neglect to name a function.
Assert1(F.hasName() || F.hasLocalLinkage(),
"Unusual: Unnamed function with non-local linkage", &F);
+
+ // TODO: Check for irreducible control flow.
}
void Lint::visitCallSite(CallSite CS) {
Instruction &I = *CS.getInstruction();
Value *Callee = CS.getCalledValue();
- visitMemoryReference(I, Callee, 0, 0, MemRef::Callee);
+ visitMemoryReference(I, Callee, ~0u, 0, 0, MemRef::Callee);
- if (Function *F = dyn_cast<Function>(Callee->stripPointerCasts())) {
+ if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
Assert1(CS.getCallingConv() == F->getCallingConv(),
"Undefined behavior: Caller and callee calling convention differ",
&I);
@@ -209,23 +224,53 @@ void Lint::visitCallSite(CallSite CS) {
FT->getNumParams() == NumActualArgs,
"Undefined behavior: Call argument count mismatches callee "
"argument count", &I);
-
- // TODO: Check argument types (in case the callee was casted)
-
- // TODO: Check ABI-significant attributes.
- // TODO: Check noalias attribute.
-
- // TODO: Check sret attribute.
+ Assert1(FT->getReturnType() == I.getType(),
+ "Undefined behavior: Call return type mismatches "
+ "callee return type", &I);
+
+ // Check argument types (in case the callee was casted) and attributes.
+ // TODO: Verify that caller and callee attributes are compatible.
+ Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
+ CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
+ for (; AI != AE; ++AI) {
+ Value *Actual = *AI;
+ if (PI != PE) {
+ Argument *Formal = PI++;
+ Assert1(Formal->getType() == Actual->getType(),
+ "Undefined behavior: Call argument type mismatches "
+ "callee parameter type", &I);
+
+ // Check that noalias arguments don't alias other arguments. The
+ // AliasAnalysis API isn't expressive enough for what we really want
+ // to do. Known partial overlap is not distinguished from the case
+ // where nothing is known.
+ if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
+ for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) {
+ Assert1(AI == BI ||
+ AA->alias(*AI, ~0u, *BI, ~0u) != AliasAnalysis::MustAlias,
+ "Unusual: noalias argument aliases another argument", &I);
+ }
+
+ // Check that an sret argument points to valid memory.
+ if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
+ const Type *Ty =
+ cast<PointerType>(Formal->getType())->getElementType();
+ visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
+ TD ? TD->getABITypeAlignment(Ty) : 0,
+ Ty, MemRef::Read | MemRef::Write);
+ }
+ }
+ }
}
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
- Value *Obj = (*AI)->getUnderlyingObject();
- Assert1(!isa<AllocaInst>(Obj) && !isa<VAArgInst>(Obj),
+ Value *Obj = findValue(*AI, /*OffsetOk=*/true);
+ Assert1(!isa<AllocaInst>(Obj),
"Undefined behavior: Call with \"tail\" keyword references "
- "alloca or va_arg", &I);
+ "alloca", &I);
}
@@ -237,9 +282,10 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memcpy: {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
- visitMemoryReference(I, MCI->getSource(), MCI->getAlignment(), 0,
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MCI->getDest(), ~0u, MCI->getAlignment(), 0,
MemRef::Write);
- visitMemoryReference(I, MCI->getDest(), MCI->getAlignment(), 0,
+ visitMemoryReference(I, MCI->getSource(), ~0u, MCI->getAlignment(), 0,
MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
@@ -247,7 +293,8 @@ void Lint::visitCallSite(CallSite CS) {
// overlap is not distinguished from the case where nothing is known.
unsigned Size = 0;
if (const ConstantInt *Len =
- dyn_cast<ConstantInt>(MCI->getLength()->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(MCI->getLength(),
+ /*OffsetOk=*/false)))
if (Len->getValue().isIntN(32))
Size = Len->getValue().getZExtValue();
Assert1(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
@@ -257,15 +304,17 @@ void Lint::visitCallSite(CallSite CS) {
}
case Intrinsic::memmove: {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
- visitMemoryReference(I, MMI->getSource(), MMI->getAlignment(), 0,
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MMI->getDest(), ~0u, MMI->getAlignment(), 0,
MemRef::Write);
- visitMemoryReference(I, MMI->getDest(), MMI->getAlignment(), 0,
+ visitMemoryReference(I, MMI->getSource(), ~0u, MMI->getAlignment(), 0,
MemRef::Read);
break;
}
case Intrinsic::memset: {
MemSetInst *MSI = cast<MemSetInst>(&I);
- visitMemoryReference(I, MSI->getDest(), MSI->getAlignment(), 0,
+ // TODO: If the size is known, use it.
+ visitMemoryReference(I, MSI->getDest(), ~0u, MSI->getAlignment(), 0,
MemRef::Write);
break;
}
@@ -275,15 +324,15 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: va_start called in a non-varargs function",
&I);
- visitMemoryReference(I, CS.getArgument(0), 0, 0,
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
- visitMemoryReference(I, CS.getArgument(0), 0, 0, MemRef::Write);
- visitMemoryReference(I, CS.getArgument(1), 0, 0, MemRef::Read);
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0, MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(1), ~0u, 0, 0, MemRef::Read);
break;
case Intrinsic::vaend:
- visitMemoryReference(I, CS.getArgument(0), 0, 0,
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
MemRef::Read | MemRef::Write);
break;
@@ -291,7 +340,7 @@ void Lint::visitCallSite(CallSite CS) {
// Stackrestore doesn't read or write memory, but it sets the
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
- visitMemoryReference(I, CS.getArgument(0), 0, 0,
+ visitMemoryReference(I, CS.getArgument(0), ~0u, 0, 0,
MemRef::Read | MemRef::Write);
break;
}
@@ -310,17 +359,35 @@ void Lint::visitReturnInst(ReturnInst &I) {
Assert1(!F->doesNotReturn(),
"Unusual: Return statement in function with noreturn attribute",
&I);
+
+ if (Value *V = I.getReturnValue()) {
+ Value *Obj = findValue(V, /*OffsetOk=*/true);
+ Assert1(!isa<AllocaInst>(Obj),
+ "Unusual: Returning alloca value", &I);
+ }
}
-// TODO: Add a length argument and check that the reference is in bounds
+// TODO: Check that the reference is in bounds.
+// TODO: Check readnone/readonly function attributes.
void Lint::visitMemoryReference(Instruction &I,
- Value *Ptr, unsigned Align, const Type *Ty,
- unsigned Flags) {
- Value *UnderlyingObject = Ptr->getUnderlyingObject();
+ Value *Ptr, unsigned Size, unsigned Align,
+ const Type *Ty, unsigned Flags) {
+ // If no memory is being referenced, it doesn't matter if the pointer
+ // is valid.
+ if (Size == 0)
+ return;
+
+ Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
Assert1(!isa<ConstantPointerNull>(UnderlyingObject),
"Undefined behavior: Null pointer dereference", &I);
Assert1(!isa<UndefValue>(UnderlyingObject),
"Undefined behavior: Undef pointer dereference", &I);
+ Assert1(!isa<ConstantInt>(UnderlyingObject) ||
+ !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(),
+ "Unusual: All-ones pointer dereference", &I);
+ Assert1(!isa<ConstantInt>(UnderlyingObject) ||
+ !cast<ConstantInt>(UnderlyingObject)->isOne(),
+ "Unusual: Address one pointer dereference", &I);
if (Flags & MemRef::Write) {
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
@@ -361,13 +428,16 @@ void Lint::visitMemoryReference(Instruction &I,
}
void Lint::visitLoadInst(LoadInst &I) {
- visitMemoryReference(I, I.getPointerOperand(), I.getAlignment(), I.getType(),
- MemRef::Read);
+ visitMemoryReference(I, I.getPointerOperand(),
+ AA->getTypeStoreSize(I.getType()), I.getAlignment(),
+ I.getType(), MemRef::Read);
}
void Lint::visitStoreInst(StoreInst &I) {
- visitMemoryReference(I, I.getPointerOperand(), I.getAlignment(),
- I.getOperand(0)->getType(), MemRef::Write);
+ visitMemoryReference(I, I.getPointerOperand(),
+ AA->getTypeStoreSize(I.getOperand(0)->getType()),
+ I.getAlignment(),
+ I.getOperand(0)->getType(), MemRef::Write);
}
void Lint::visitXor(BinaryOperator &I) {
@@ -384,21 +454,21 @@ void Lint::visitSub(BinaryOperator &I) {
void Lint::visitLShr(BinaryOperator &I) {
if (ConstantInt *CI =
- dyn_cast<ConstantInt>(I.getOperand(1)->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
void Lint::visitAShr(BinaryOperator &I) {
if (ConstantInt *CI =
- dyn_cast<ConstantInt>(I.getOperand(1)->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
void Lint::visitShl(BinaryOperator &I) {
if (ConstantInt *CI =
- dyn_cast<ConstantInt>(I.getOperand(1)->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
@@ -439,27 +509,31 @@ void Lint::visitAllocaInst(AllocaInst &I) {
// This isn't undefined behavior, it's just an obvious pessimization.
Assert1(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
"Pessimization: Static alloca outside of entry block", &I);
+
+ // TODO: Check for an unusual size (MSB set?)
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), 0, 0,
+ visitMemoryReference(I, I.getOperand(0), ~0u, 0, 0,
MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), 0, 0, MemRef::Branchee);
+ visitMemoryReference(I, I.getAddress(), ~0u, 0, 0, MemRef::Branchee);
}
void Lint::visitExtractElementInst(ExtractElementInst &I) {
if (ConstantInt *CI =
- dyn_cast<ConstantInt>(I.getIndexOperand()->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
+ /*OffsetOk=*/false)))
Assert1(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
"Undefined result: extractelement index out of range", &I);
}
void Lint::visitInsertElementInst(InsertElementInst &I) {
if (ConstantInt *CI =
- dyn_cast<ConstantInt>(I.getOperand(2)->stripPointerCasts()))
+ dyn_cast<ConstantInt>(findValue(I.getOperand(2),
+ /*OffsetOk=*/false)))
Assert1(CI->getValue().ult(I.getType()->getNumElements()),
"Undefined result: insertelement index out of range", &I);
}
@@ -472,6 +546,91 @@ void Lint::visitUnreachableInst(UnreachableInst &I) {
"side effects", &I);
}
+/// findValue - Look through bitcasts and simple memory reference patterns
+/// to identify an equivalent, but more informative, value. If OffsetOk
+/// is true, look through getelementptrs with non-zero offsets too.
+///
+/// Most analysis passes don't require this logic, because instcombine
+/// will simplify most of these kinds of things away. But it's a goal of
+/// this Lint pass to be useful even on non-optimized IR.
+Value *Lint::findValue(Value *V, bool OffsetOk) const {
+ SmallPtrSet<Value *, 4> Visited;
+ return findValueImpl(V, OffsetOk, Visited);
+}
+
+/// findValueImpl - Implementation helper for findValue.
+Value *Lint::findValueImpl(Value *V, bool OffsetOk,
+ SmallPtrSet<Value *, 4> &Visited) const {
+ // Detect self-referential values.
+ if (!Visited.insert(V))
+ return UndefValue::get(V->getType());
+
+ // TODO: Look through sext or zext cast, when the result is known to
+ // be interpreted as signed or unsigned, respectively.
+ // TODO: Look through eliminable cast pairs.
+ // TODO: Look through calls with unique return values.
+ // TODO: Look through vector insert/extract/shuffle.
+ V = OffsetOk ? V->getUnderlyingObject() : V->stripPointerCasts();
+ if (LoadInst *L = dyn_cast<LoadInst>(V)) {
+ BasicBlock::iterator BBI = L;
+ BasicBlock *BB = L->getParent();
+ SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
+ for (;;) {
+ if (!VisitedBlocks.insert(BB)) break;
+ if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
+ BB, BBI, 6, AA))
+ return findValueImpl(U, OffsetOk, Visited);
+ if (BBI != BB->begin()) break;
+ BB = BB->getUniquePredecessor();
+ if (!BB) break;
+ BBI = BB->end();
+ }
+ } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ if (Value *W = PN->hasConstantValue(DT))
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
+ if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
+ Type::getInt64Ty(V->getContext())))
+ return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
+ } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
+ if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
+ Ex->idx_begin(),
+ Ex->idx_end()))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ // Same as above, but for ConstantExpr instead of Instruction.
+ if (Instruction::isCast(CE->getOpcode())) {
+ if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
+ CE->getOperand(0)->getType(),
+ CE->getType(),
+ TD ? TD->getIntPtrType(V->getContext()) :
+ Type::getInt64Ty(V->getContext())))
+ return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
+ } else if (CE->getOpcode() == Instruction::ExtractValue) {
+ const SmallVector<unsigned, 4> &Indices = CE->getIndices();
+ if (Value *W = FindInsertedValue(CE->getOperand(0),
+ Indices.begin(),
+ Indices.end()))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ }
+ }
+
+ // As a last resort, try SimplifyInstruction or constant folding.
+ if (Instruction *Inst = dyn_cast<Instruction>(V)) {
+ if (Value *W = SimplifyInstruction(Inst, TD))
+ if (W != Inst)
+ return findValueImpl(W, OffsetOk, Visited);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ if (Value *W = ConstantFoldConstantExpression(CE, TD))
+ if (W != V)
+ return findValueImpl(W, OffsetOk, Visited);
+ }
+
+ return V;
+}
+
//===----------------------------------------------------------------------===//
// Implement the public interfaces to this file...
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Analysis/Loads.cpp b/contrib/llvm/lib/Analysis/Loads.cpp
new file mode 100644
index 0000000..2ba1d86
--- /dev/null
+++ b/contrib/llvm/lib/Analysis/Loads.cpp
@@ -0,0 +1,235 @@
+//===- Loads.cpp - Local load analysis ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/GlobalAlias.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/IntrinsicInst.h"
+using namespace llvm;
+
+/// AreEquivalentAddressValues - Test if A and B will obviously have the same
+/// value. This includes recognizing that %t0 and %t1 will have the same
+/// value in code like this:
+/// %t0 = getelementptr \@a, 0, 3
+/// store i32 0, i32* %t0
+/// %t1 = getelementptr \@a, 0, 3
+/// %t2 = load i32* %t1
+///
+static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
+ // Test if the values are trivially equivalent.
+ if (A == B) return true;
+
+ // Test if the values come from identical arithmetic instructions.
+ // Use isIdenticalToWhenDefined instead of isIdenticalTo because
+ // this function is only used when one address use dominates the
+ // other, which means that they'll always either have the same
+ // value or one of them will have an undefined value.
+ if (isa<BinaryOperator>(A) || isa<CastInst>(A) ||
+ isa<PHINode>(A) || isa<GetElementPtrInst>(A))
+ if (const Instruction *BI = dyn_cast<Instruction>(B))
+ if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
+ return true;
+
+ // Otherwise they may not be equivalent.
+ return false;
+}
+
+/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
+/// bitcasts to get back to the underlying object being addressed, keeping
+/// track of the offset in bytes from the GEPs relative to the result.
+/// This is closely related to Value::getUnderlyingObject but is located
+/// here to avoid making VMCore depend on TargetData.
+static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
+ uint64_t &ByteOffset,
+ unsigned MaxLookup = 6) {
+ if (!V->getType()->isPointerTy())
+ return V;
+ for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+ if (!GEP->hasAllConstantIndices())
+ return V;
+ SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
+ ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
+ &Indices[0], Indices.size());
+ V = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(V) == Instruction::BitCast) {
+ V = cast<Operator>(V)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (GA->mayBeOverridden())
+ return V;
+ V = GA->getAliasee();
+ } else {
+ return V;
+ }
+ assert(V->getType()->isPointerTy() && "Unexpected operand type!");
+ }
+ return V;
+}
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align, const TargetData *TD) {
+ uint64_t ByteOffset = 0;
+ Value *Base = V;
+ if (TD)
+ Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
+
+ const Type *BaseType = 0;
+ unsigned BaseAlign = 0;
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
+ // An alloca is safe to load from as load as it is suitably aligned.
+ BaseType = AI->getAllocatedType();
+ BaseAlign = AI->getAlignment();
+ } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
+ // Global variables are safe to load from but their size cannot be
+ // guaranteed if they are overridden.
+ if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
+ BaseType = GV->getType()->getElementType();
+ BaseAlign = GV->getAlignment();
+ }
+ }
+
+ if (BaseType && BaseType->isSized()) {
+ if (TD && BaseAlign == 0)
+ BaseAlign = TD->getPrefTypeAlignment(BaseType);
+
+ if (Align <= BaseAlign) {
+ if (!TD)
+ return true; // Loading directly from an alloca or global is OK.
+
+ // Check if the load is within the bounds of the underlying object.
+ const PointerType *AddrTy = cast<PointerType>(V->getType());
+ uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
+ if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
+ (Align == 0 || (ByteOffset % Align) == 0))
+ return true;
+ }
+ }
+
+ // Otherwise, be a little bit aggressive by scanning the local block where we
+ // want to check to see if the pointer is already being loaded or stored
+ // from/to. If so, the previous load or store would have already trapped,
+ // so there is no harm doing an extra load (also, CSE will later eliminate
+ // the load entirely).
+ BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
+
+ while (BBI != E) {
+ --BBI;
+
+ // If we see a free or a call which may write to memory (i.e. which might do
+ // a free) the pointer could be marked invalid.
+ if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
+ !isa<DbgInfoIntrinsic>(BBI))
+ return false;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
+ if (AreEquivalentAddressValues(LI->getOperand(0), V)) return true;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ if (AreEquivalentAddressValues(SI->getOperand(1), V)) return true;
+ }
+ }
+ return false;
+}
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
+/// instruction before ScanFrom) checking to see if we have the value at the
+/// memory address *Ptr locally available within a small number of instructions.
+/// If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't find
+/// something that invalidates *Ptr or provides it, ScanFrom would be left at
+/// begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
+/// it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan,
+ AliasAnalysis *AA) {
+ if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
+
+ // If we're using alias analysis to disambiguate get the size of *Ptr.
+ unsigned AccessSize = 0;
+ if (AA) {
+ const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
+ AccessSize = AA->getTypeStoreSize(AccessTy);
+ }
+
+ while (ScanFrom != ScanBB->begin()) {
+ // We must ignore debug info directives when counting (otherwise they
+ // would affect codegen).
+ Instruction *Inst = --ScanFrom;
+ if (isa<DbgInfoIntrinsic>(Inst))
+ continue;
+
+ // Restore ScanFrom to expected value in case next test succeeds
+ ScanFrom++;
+
+ // Don't scan huge blocks.
+ if (MaxInstsToScan-- == 0) return 0;
+
+ --ScanFrom;
+ // If this is a load of Ptr, the loaded value is available.
+ if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
+ return LI;
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // If this is a store through Ptr, the value is available!
+ if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
+ return SI->getOperand(0);
+
+ // If Ptr is an alloca and this is a store to a different alloca, ignore
+ // the store. This is a trivial form of alias analysis that is important
+ // for reg2mem'd code.
+ if ((isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)) &&
+ (isa<AllocaInst>(SI->getOperand(1)) ||
+ isa<GlobalVariable>(SI->getOperand(1))))
+ continue;
+
+ // If we have alias analysis and it says the store won't modify the loaded
+ // value, ignore the store.
+ if (AA &&
+ (AA->getModRefInfo(SI, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
+ continue;
+
+ // Otherwise the store that may or may not alias the pointer, bail out.
+ ++ScanFrom;
+ return 0;
+ }
+
+ // If this is some other instruction that may clobber Ptr, bail out.
+ if (Inst->mayWriteToMemory()) {
+ // If alias analysis claims that it really won't modify the load,
+ // ignore it.
+ if (AA &&
+ (AA->getModRefInfo(Inst, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
+ continue;
+
+ // May modify the pointer, bail out.
+ ++ScanFrom;
+ return 0;
+ }
+ }
+
+ // Got to the start of the block, we didn't find it, but are done for this
+ // block.
+ return 0;
+}
diff --git a/contrib/llvm/lib/Analysis/LoopInfo.cpp b/contrib/llvm/lib/Analysis/LoopInfo.cpp
index 735e31f..818d0a9 100644
--- a/contrib/llvm/lib/Analysis/LoopInfo.cpp
+++ b/contrib/llvm/lib/Analysis/LoopInfo.cpp
@@ -266,15 +266,16 @@ unsigned Loop::getSmallConstantTripMultiple() const {
bool Loop::isLCSSAForm(DominatorTree &DT) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
- SmallPtrSet<BasicBlock *, 16> LoopBBs(block_begin(), block_end());
+ SmallPtrSet<BasicBlock*, 16> LoopBBs(block_begin(), block_end());
for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) {
BasicBlock *BB = *BI;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I)
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
++UI) {
- BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(*UI))
+ User *U = *UI;
+ BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(U))
UserBB = P->getIncomingBlock(UI);
// Check the current block, as a fast-path, before checking whether
diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
index 89f9743..1ab18ca 100644
--- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -101,9 +101,9 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
if (const StructType *ST = dyn_cast<StructType>(T))
ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
- // If malloc calls' arg can be determined to be a multiple of ElementSize,
+ // If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
- Value *MallocArg = CI->getOperand(1);
+ Value *MallocArg = CI->getArgOperand(0);
Value *Multiple = NULL;
if (ComputeMultiple(MallocArg, ElementSize, Multiple,
LookThroughSExt))
@@ -120,7 +120,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
Value *ArraySize = computeArraySize(CI, TD);
if (ArraySize &&
- ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1))
+ ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
return CI;
// CI is a non-array malloc or we can't figure out that it is an array malloc.
@@ -183,25 +183,25 @@ Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
// free Call Utility Functions.
//
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool llvm::isFreeCall(const Value *I) {
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *llvm::isFreeCall(const Value *I) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI)
- return false;
+ return 0;
Function *Callee = CI->getCalledFunction();
if (Callee == 0 || !Callee->isDeclaration() || Callee->getName() != "free")
- return false;
+ return 0;
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
const FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
- return false;
+ return 0;
if (FTy->getNumParams() != 1)
- return false;
+ return 0;
if (FTy->param_begin()->get() != Type::getInt8PtrTy(Callee->getContext()))
- return false;
+ return 0;
- return true;
+ return CI;
}
diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 2aa2f17..1f54d74 100644
--- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -116,8 +116,8 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
} else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Pointer = V->getOperand(0);
PointerSize = AA->getTypeStoreSize(V->getType());
- } else if (isFreeCall(Inst)) {
- Pointer = Inst->getOperand(1);
+ } else if (const CallInst *CI = isFreeCall(Inst)) {
+ Pointer = CI->getArgOperand(0);
// calls to free() erase the entire structure
PointerSize = ~0ULL;
} else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
@@ -197,9 +197,9 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
AliasAnalysis::AliasResult R =
- AA->alias(II->getOperand(3), ~0U, MemPtr, ~0U);
+ AA->alias(II->getArgOperand(2), ~0U, MemPtr, ~0U);
if (R == AliasAnalysis::MustAlias) {
- InvariantTag = II->getOperand(1);
+ InvariantTag = II->getArgOperand(0);
continue;
}
@@ -210,7 +210,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point.
AliasAnalysis::AliasResult R =
- AA->alias(II->getOperand(2), ~0U, MemPtr, ~0U);
+ AA->alias(II->getArgOperand(1), ~0U, MemPtr, ~0U);
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(II);
}
@@ -365,25 +365,26 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
MemPtr = LI->getPointerOperand();
MemSize = AA->getTypeStoreSize(LI->getType());
}
- } else if (isFreeCall(QueryInst)) {
- MemPtr = QueryInst->getOperand(1);
+ } else if (const CallInst *CI = isFreeCall(QueryInst)) {
+ MemPtr = CI->getArgOperand(0);
// calls to free() erase the entire structure, not just a field.
MemSize = ~0UL;
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
int IntrinsicID = 0; // Intrinsic IDs start at 1.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
+ if (II)
IntrinsicID = II->getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- MemPtr = QueryInst->getOperand(2);
- MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue();
+ MemPtr = II->getArgOperand(1);
+ MemSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
break;
case Intrinsic::invariant_end:
- MemPtr = QueryInst->getOperand(3);
- MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue();
+ MemPtr = II->getArgOperand(2);
+ MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
break;
default:
CallSite QueryCS = CallSite::get(QueryInst);
@@ -456,7 +457,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
// Okay, we have a cache entry. If we know it is not dirty, just return it
// with no computation.
if (!CacheP.second) {
- NumCacheNonLocal++;
+ ++NumCacheNonLocal;
return Cache;
}
@@ -478,7 +479,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
DirtyBlocks.push_back(*PI);
- NumUncacheNonLocal++;
+ ++NumUncacheNonLocal;
}
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
diff --git a/contrib/llvm/lib/Analysis/PostDominators.cpp b/contrib/llvm/lib/Analysis/PostDominators.cpp
index f0f3a05..7354afa 100644
--- a/contrib/llvm/lib/Analysis/PostDominators.cpp
+++ b/contrib/llvm/lib/Analysis/PostDominators.cpp
@@ -67,10 +67,11 @@ PostDominanceFrontier::calculate(const PostDominatorTree &DT,
if (BB)
for (pred_iterator SI = pred_begin(BB), SE = pred_end(BB);
SI != SE; ++SI) {
+ BasicBlock *P = *SI;
// Does Node immediately dominate this predecessor?
- DomTreeNode *SINode = DT[*SI];
+ DomTreeNode *SINode = DT[P];
if (SINode && SINode->getIDom() != Node)
- S.insert(*SI);
+ S.insert(P);
}
// At this point, S is DFlocal. Now we union in DFup's of our children...
diff --git a/contrib/llvm/lib/Analysis/ProfileInfo.cpp b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
index 662576e..8d2712f 100644
--- a/contrib/llvm/lib/Analysis/ProfileInfo.cpp
+++ b/contrib/llvm/lib/Analysis/ProfileInfo.cpp
@@ -71,22 +71,24 @@ ProfileInfoT<Function,BasicBlock>::getExecutionCount(const BasicBlock *BB) {
// Are there zero predecessors of this block?
if (PI == PE) {
- Edge e = getEdge(0,BB);
+ Edge e = getEdge(0, BB);
Count = getEdgeWeight(e);
} else {
// Otherwise, if there are predecessors, the execution count of this block is
// the sum of the edge frequencies from the incoming edges.
std::set<const BasicBlock*> ProcessedPreds;
Count = 0;
- for (; PI != PE; ++PI)
- if (ProcessedPreds.insert(*PI).second) {
- double w = getEdgeWeight(getEdge(*PI, BB));
+ for (; PI != PE; ++PI) {
+ const BasicBlock *P = *PI;
+ if (ProcessedPreds.insert(P).second) {
+ double w = getEdgeWeight(getEdge(P, BB));
if (w == MissingValue) {
Count = MissingValue;
break;
}
Count += w;
}
+ }
}
// If the predecessors did not suffice to get block weight, try successors.
@@ -577,8 +579,6 @@ static void readEdge(ProfileInfo *PI, ProfileInfo::Edge e, double &calcw, std::s
template<>
bool ProfileInfoT<Function,BasicBlock>::EstimateMissingEdges(const BasicBlock *BB) {
- bool hasNoSuccessors = false;
-
double inWeight = 0;
std::set<Edge> inMissing;
std::set<const BasicBlock*> ProcessedPreds;
@@ -596,10 +596,8 @@ bool ProfileInfoT<Function,BasicBlock>::EstimateMissingEdges(const BasicBlock *B
std::set<Edge> outMissing;
std::set<const BasicBlock*> ProcessedSuccs;
succ_const_iterator sbbi = succ_begin(BB), sbbe = succ_end(BB);
- if (sbbi == sbbe) {
+ if (sbbi == sbbe)
readEdge(this,getEdge(BB,0),outWeight,outMissing);
- hasNoSuccessors = true;
- }
for ( ; sbbi != sbbe; ++sbbi ) {
if (ProcessedSuccs.insert(*sbbi).second) {
readEdge(this,getEdge(BB,*sbbi),outWeight,outMissing);
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
index 6870268..413b3b4 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -822,7 +822,8 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
// Fold if the operand is constant.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
return getConstant(
- cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
+ cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// trunc(trunc(x)) --> trunc(x)
if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
@@ -844,9 +845,9 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
return getAddRecExpr(Operands, AddRec->getLoop());
}
- // The cast wasn't folded; create an explicit cast node.
- // Recompute the insert position, as it may have been invalidated.
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // The cast wasn't folded; create an explicit cast node. We can reuse
+ // the existing insert position since if we get here, we won't have
+ // made any changes which would invalidate it.
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
@@ -862,12 +863,10 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// zext(zext(x)) --> zext(x)
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
@@ -997,12 +996,10 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
Ty = getEffectiveSCEVType(Ty);
// Fold if the operand is constant.
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
- const Type *IntTy = getEffectiveSCEVType(Ty);
- Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
- if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
- return getConstant(cast<ConstantInt>(C));
- }
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+ return getConstant(
+ cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
+ getEffectiveSCEVType(Ty))));
// sext(sext(x)) --> sext(x)
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
@@ -1208,8 +1205,19 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
ScalarEvolution &SE) {
bool Interesting = false;
- // Iterate over the add operands.
- for (unsigned i = 0, e = NumOperands; i != e; ++i) {
+ // Iterate over the add operands. They are sorted, with constants first.
+ unsigned i = 0;
+ while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
+ ++i;
+ // Pull a buried constant out to the outside.
+ if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
+ Interesting = true;
+ AccumulatedConstant += Scale * C->getValue()->getValue();
+ }
+
+ // Next comes everything else. We're especially interested in multiplies
+ // here, but they're in the middle, so just visit the rest with one loop.
+ for (; i != NumOperands; ++i) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
APInt NewScale =
@@ -1237,11 +1245,6 @@ CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
Interesting = true;
}
}
- } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
- // Pull a buried constant out to the outside.
- if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
- Interesting = true;
- AccumulatedConstant += Scale * C->getValue()->getValue();
} else {
// An ordinary operand. Update the map.
std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
@@ -1275,9 +1278,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty add!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
+ const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
- assert(getEffectiveSCEVType(Ops[i]->getType()) ==
- getEffectiveSCEVType(Ops[0]->getType()) &&
+ assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
#endif
@@ -1400,8 +1403,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
// If we have an add, expand the add operands onto the end of the operands
// list.
- Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(Add->op_begin(), Add->op_end());
DeletedAdd = true;
}
@@ -1549,9 +1552,11 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
AddRec->op_end());
AddRecOps[0] = getAddExpr(LIOps);
- // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
- // is not associative so this isn't necessarily safe.
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer add and the inner addrec are guaranteed to have no overflow.
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
+ HasNUW && AddRec->hasNoUnsignedWrap(),
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -1578,7 +1583,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
AddRec->op_end());
for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
if (i >= NewOps.size()) {
- NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
+ NewOps.append(OtherAddRec->op_begin()+i,
OtherAddRec->op_end());
break;
}
@@ -1711,8 +1716,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
// If we have an mul, expand the mul operands onto the end of the operands
// list.
- Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(Mul->op_begin(), Mul->op_end());
DeletedMul = true;
}
@@ -1747,23 +1752,15 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
SmallVector<const SCEV *, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
- if (LIOps.size() == 1) {
- const SCEV *Scale = LIOps[0];
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- } else {
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
- MulOps.push_back(AddRec->getOperand(i));
- NewOps.push_back(getMulExpr(MulOps));
- }
- }
+ const SCEV *Scale = getMulExpr(LIOps);
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- // It's tempting to propagate the NSW flag here, but nsw multiplication
- // is not associative so this isn't necessarily safe.
+ // Build the new addrec. Propagate the NUW and NSW flags if both the
+ // outer mul and the inner addrec are guaranteed to have no overflow.
const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
HasNUW && AddRec->hasNoUnsignedWrap(),
- /*HasNSW=*/false);
+ HasNSW && AddRec->hasNoSignedWrap());
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
@@ -1942,8 +1939,7 @@ const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
Operands.push_back(Start);
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
if (StepChrec->getLoop() == L) {
- Operands.insert(Operands.end(), StepChrec->op_begin(),
- StepChrec->op_end());
+ Operands.append(StepChrec->op_begin(), StepChrec->op_end());
return getAddRecExpr(Operands, L);
}
@@ -2106,8 +2102,8 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
if (Idx < Ops.size()) {
bool DeletedSMax = false;
while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
- Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(SMax->op_begin(), SMax->op_end());
DeletedSMax = true;
}
@@ -2211,8 +2207,8 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
if (Idx < Ops.size()) {
bool DeletedUMax = false;
while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
- Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
Ops.erase(Ops.begin()+Idx);
+ Ops.append(UMax->op_begin(), UMax->op_end());
DeletedUMax = true;
}
@@ -2278,7 +2274,8 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2286,7 +2283,8 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2302,7 +2300,8 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2311,7 +2310,8 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD);
+ if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
+ C = Folded;
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2398,13 +2398,6 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
return S;
}
-/// getIntegerSCEV - Given a SCEVable type, create a constant for the
-/// specified signed integer value and return a SCEV for the constant.
-const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
- const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
- return getConstant(ConstantInt::get(ITy, Val));
-}
-
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
///
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
@@ -2772,7 +2765,11 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
///
const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
- bool InBounds = GEP->isInBounds();
+ // Don't blindly transfer the inbounds flag from the GEP instruction to the
+ // Add expression, because the Instruction may be guarded by control flow
+ // and the no-overflow bits may not be valid for the expression in any
+ // context.
+
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
@@ -2788,23 +2785,30 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- TotalOffset = getAddExpr(TotalOffset,
- getOffsetOfExpr(STy, FieldNo),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+
+ // Add the field offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, FieldOffset);
} else {
// For an array, add the element offset, explicitly scaled.
- const SCEV *LocalOffset = getSCEV(Index);
+ const SCEV *ElementSize = getSizeOfExpr(*GTI);
+ const SCEV *IndexS = getSCEV(Index);
// Getelementptr indices are signed.
- LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- // Lower "inbounds" GEPs to NSW arithmetic.
- LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
- TotalOffset = getAddExpr(TotalOffset, LocalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
+
+ // Multiply the index by the element size to compute the element offset.
+ const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+
+ // Add the element offset to the running total offset.
+ TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
}
- return getAddExpr(getSCEV(Base), TotalOffset,
- /*HasNUW=*/false, /*HasNSW=*/InBounds);
+
+ // Get the SCEV for the GEP base.
+ const SCEV *BaseS = getSCEV(Base);
+
+ // Add the total offset from all the GEP indices to the base.
+ return getAddExpr(BaseS, TotalOffset);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
@@ -2963,7 +2967,8 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
if (!C->getValue()->isZero())
ConservativeResult =
- ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
+ ConservativeResult.intersectWith(
+ ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
// TODO: non-affine addrec
if (AddRec->isAffine()) {
@@ -3196,15 +3201,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
Operator *U = cast<Operator>(V);
switch (Opcode) {
case Instruction::Add:
- // Don't transfer the NSW and NUW bits from the Add instruction to the
- // Add expression, because the Instruction may be guarded by control
- // flow and the no-overflow bits may not be valid for the expression in
- // any context.
return getAddExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
case Instruction::Mul:
- // Don't transfer the NSW and NUW bits from the Mul instruction to the
- // Mul expression, as with Add.
return getMulExpr(getSCEV(U->getOperand(0)),
getSCEV(U->getOperand(1)));
case Instruction::UDiv:
@@ -3658,6 +3657,26 @@ void ScalarEvolution::forgetValue(Value *V) {
ConstantEvolutionLoopExitValue.erase(PN);
}
+ // If there's a SCEVUnknown tying this value into the SCEV
+ // space, remove it from the folding set map. The SCEVUnknown
+ // object and any other SCEV objects which reference it
+ // (transitively) remain allocated, effectively leaked until
+ // the underlying BumpPtrAllocator is freed.
+ //
+ // This permits SCEV pointers to be used as keys in maps
+ // such as the ValuesAtScopes map.
+ FoldingSetNodeID ID;
+ ID.AddInteger(scUnknown);
+ ID.AddPointer(I);
+ void *IP;
+ if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+ UniqueSCEVs.RemoveNode(S);
+
+ // This isn't necessary, but we might as well remove the
+ // value from the ValuesAtScopes map too.
+ ValuesAtScopes.erase(S);
+ }
+
PushDefUseChildren(I, Worklist);
}
}
@@ -4139,8 +4158,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
// constant or derived from a PHI node themselves.
PHINode *PHI = 0;
for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
- if (!(isa<Constant>(I->getOperand(Op)) ||
- isa<GlobalValue>(I->getOperand(Op)))) {
+ if (!isa<Constant>(I->getOperand(Op))) {
PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
if (P == 0) return 0; // Not evolving from PHI
if (PHI == 0)
@@ -4161,11 +4179,9 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
const TargetData *TD) {
if (isa<PHINode>(V)) return PHIVal;
if (Constant *C = dyn_cast<Constant>(V)) return C;
- if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
Instruction *I = cast<Instruction>(V);
- std::vector<Constant*> Operands;
- Operands.resize(I->getNumOperands());
+ std::vector<Constant*> Operands(I->getNumOperands());
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
@@ -4207,8 +4223,8 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
return RetVal = 0; // Must be a constant.
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
- if (PN2 != PN)
+ if (getConstantEvolvingPHI(BEValue, L) != PN &&
+ !isa<Constant>(BEValue))
return RetVal = 0; // Not derived from same PHI.
// Execute the loop symbolically to determine the exit value.
@@ -4243,8 +4259,11 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
PHINode *PN = getConstantEvolvingPHI(Cond, L);
if (PN == 0) return getCouldNotCompute();
- // Since the loop is canonicalized, the PHI node must have two entries. One
- // entry must be a constant (coming in from outside of the loop), and the
+ // If the loop is canonicalized, the PHI will have exactly two entries.
+ // That's the only form we support here.
+ if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
+
+ // One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
Constant *StartCST =
@@ -4252,8 +4271,9 @@ ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
- PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
- if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
+ if (getConstantEvolvingPHI(BEValue, L) != PN &&
+ !isa<Constant>(BEValue))
+ return getCouldNotCompute(); // Not derived from same PHI.
// Okay, we find a PHI node that defines the trip count of this loop. Execute
// the loop symbolically to determine when the condition gets a value of
@@ -4341,54 +4361,51 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// the arguments into constants, and if so, try to constant propagate the
// result. This is particularly useful for computing loop exit values.
if (CanConstantFold(I)) {
- std::vector<Constant*> Operands;
- Operands.reserve(I->getNumOperands());
+ SmallVector<Constant *, 4> Operands;
+ bool MadeImprovement = false;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *Op = I->getOperand(i);
if (Constant *C = dyn_cast<Constant>(Op)) {
Operands.push_back(C);
- } else {
- // If any of the operands is non-constant and if they are
- // non-integer and non-pointer, don't even try to analyze them
- // with scev techniques.
- if (!isSCEVable(Op->getType()))
- return V;
-
- const SCEV *OpV = getSCEVAtScope(Op, L);
- if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
- Constant *C = SC->getValue();
- if (C->getType() != Op->getType())
- C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
- if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
- if (C->getType() != Op->getType())
- C =
- ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
- Op->getType(),
- false),
- C, Op->getType());
- Operands.push_back(C);
- } else
- return V;
- } else {
- return V;
- }
+ continue;
}
+
+ // If any of the operands is non-constant and if they are
+ // non-integer and non-pointer, don't even try to analyze them
+ // with scev techniques.
+ if (!isSCEVable(Op->getType()))
+ return V;
+
+ const SCEV *OrigV = getSCEV(Op);
+ const SCEV *OpV = getSCEVAtScope(OrigV, L);
+ MadeImprovement |= OrigV != OpV;
+
+ Constant *C = 0;
+ if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
+ C = SC->getValue();
+ if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
+ C = dyn_cast<Constant>(SU->getValue());
+ if (!C) return V;
+ if (C->getType() != Op->getType())
+ C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+ Op->getType(),
+ false),
+ C, Op->getType());
+ Operands.push_back(C);
}
- Constant *C = 0;
- if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- Operands[0], Operands[1], TD);
- else
- C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(), TD);
- if (C)
+ // Check to see if getSCEVAtScope actually made an improvement.
+ if (MadeImprovement) {
+ Constant *C = 0;
+ if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+ C = ConstantFoldCompareInstOperands(CI->getPredicate(),
+ Operands[0], Operands[1], TD);
+ else
+ C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+ &Operands[0], Operands.size(), TD);
+ if (!C) return V;
return getSCEV(C);
+ }
}
}
@@ -4438,7 +4455,29 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// If this is a loop recurrence for a loop that does not contain L, then we
// are dealing with the final value computed by the loop.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
- if (!L || !AddRec->getLoop()->contains(L)) {
+ // First, attempt to evaluate each operand.
+ // Avoid performing the look-up in the common case where the specified
+ // expression has no loop-variant portions.
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
+ const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
+ if (OpAtScope == AddRec->getOperand(i))
+ continue;
+
+ // Okay, at least one of these operands is loop variant but might be
+ // foldable. Build a new instance of the folded commutative expression.
+ SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
+ AddRec->op_begin()+i);
+ NewOps.push_back(OpAtScope);
+ for (++i; i != e; ++i)
+ NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
+
+ AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
+ break;
+ }
+
+ // If the scope is outside the addrec's loop, evaluate it by using the
+ // loop exit value of the addrec.
+ if (!AddRec->getLoop()->contains(L)) {
// To evaluate this recurrence, we need to know how many times the AddRec
// loop iterates. Compute this now.
const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
@@ -4447,6 +4486,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Then, evaluate the AddRec.
return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
}
+
return AddRec;
}
@@ -4696,23 +4736,6 @@ ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
return getCouldNotCompute();
}
-/// getLoopPredecessor - If the given loop's header has exactly one unique
-/// predecessor outside the loop, return it. Otherwise return null.
-/// This is less strict that the loop "preheader" concept, which requires
-/// the predecessor to have only one single successor.
-///
-BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
- BasicBlock *Header = L->getHeader();
- BasicBlock *Pred = 0;
- for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
- PI != E; ++PI)
- if (!L->contains(*PI)) {
- if (Pred && Pred != *PI) return 0; // Multiple predecessors.
- Pred = *PI;
- }
- return Pred;
-}
-
/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
/// (which may not be an immediate predecessor) which has exactly one
/// successor from which BB is reachable, or null if no such block is
@@ -4730,7 +4753,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
// If the header has a unique predecessor outside the loop, it must be
// a block that has exactly one successor that can reach the loop.
if (Loop *L = LI->getLoopFor(BB))
- return std::make_pair(getLoopPredecessor(L), L->getHeader());
+ return std::make_pair(L->getLoopPredecessor(), L->getHeader());
return std::pair<BasicBlock *, BasicBlock *>();
}
@@ -5181,7 +5204,7 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
// as there are predecessors that can be found that have unique successors
// leading to the original header.
for (std::pair<BasicBlock *, BasicBlock *>
- Pair(getLoopPredecessor(L), L->getHeader());
+ Pair(L->getLoopPredecessor(), L->getHeader());
Pair.first;
Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 17b254f..58711b8 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -12,7 +12,7 @@
//
// This differs from traditional loop dependence analysis in that it tests
// for dependencies within a single iteration of a loop, rather than
-// dependences between different iterations.
+// dependencies between different iterations.
//
// ScalarEvolution has a more complete understanding of pointer arithmetic
// than BasicAliasAnalysis' collection of ad-hoc analyses.
@@ -106,6 +106,12 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
AliasAnalysis::AliasResult
ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
const Value *B, unsigned BSize) {
+ // If either of the memory references is empty, it doesn't matter what the
+ // pointer values are. This allows the code below to ignore this special
+ // case.
+ if (ASize == 0 || BSize == 0)
+ return NoAlias;
+
// This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
@@ -118,14 +124,32 @@ ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
if (SE->getEffectiveSCEVType(AS->getType()) ==
SE->getEffectiveSCEVType(BS->getType())) {
unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
- APInt AI(BitWidth, ASize);
+ APInt ASizeInt(BitWidth, ASize);
+ APInt BSizeInt(BitWidth, BSize);
+
+ // Compute the difference between the two pointers.
const SCEV *BA = SE->getMinusSCEV(BS, AS);
- if (AI.ule(SE->getUnsignedRange(BA).getUnsignedMin())) {
- APInt BI(BitWidth, BSize);
- const SCEV *AB = SE->getMinusSCEV(AS, BS);
- if (BI.ule(SE->getUnsignedRange(AB).getUnsignedMin()))
- return NoAlias;
- }
+
+ // Test whether the difference is known to be great enough that memory of
+ // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+ // are non-zero, which is special-cased above.
+ if (ASizeInt.ule(SE->getUnsignedRange(BA).getUnsignedMin()) &&
+ (-BSizeInt).uge(SE->getUnsignedRange(BA).getUnsignedMax()))
+ return NoAlias;
+
+ // Folding the subtraction while preserving range information can be tricky
+ // (because of INT_MIN, etc.); if the prior test failed, swap AS and BS
+ // and try again to see if things fold better that way.
+
+ // Compute the difference between the two pointers.
+ const SCEV *AB = SE->getMinusSCEV(AS, BS);
+
+ // Test whether the difference is known to be great enough that memory of
+ // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+ // are non-zero, which is special-cased above.
+ if (BSizeInt.ule(SE->getUnsignedRange(AB).getUnsignedMin()) &&
+ (-ASizeInt).uge(SE->getUnsignedRange(AB).getUnsignedMax()))
+ return NoAlias;
}
// If ScalarEvolution can find an underlying object, form a new query.
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
index 0012b84..d4a4b26 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -21,6 +21,43 @@
#include "llvm/ADT/STLExtras.h"
using namespace llvm;
+/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
+/// reusing an existing cast if a suitable one exists, moving an existing
+/// cast if a suitable one exists but isn't in the right place, or
+/// creating a new one.
+Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP) {
+ // Check to see if there is already a cast!
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ User *U = *UI;
+ if (U->getType() == Ty)
+ if (CastInst *CI = dyn_cast<CastInst>(U))
+ if (CI->getOpcode() == Op) {
+ // If the cast isn't where we want it, fix it.
+ if (BasicBlock::iterator(CI) != IP) {
+ // Create a new cast, and leave the old cast in place in case
+ // it is being used as an insert point. Clear its operand
+ // so that it doesn't hold anything live.
+ Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
+ NewCI->takeName(CI);
+ CI->replaceAllUsesWith(NewCI);
+ CI->setOperand(0, UndefValue::get(V->getType()));
+ rememberInstruction(NewCI);
+ return NewCI;
+ }
+ rememberInstruction(CI);
+ return CI;
+ }
+ }
+
+ // Create a new cast.
+ Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
+ rememberInstruction(I);
+ return I;
+}
+
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
@@ -54,71 +91,29 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
return CE->getOperand(0);
}
+ // Fold a cast of a constant.
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(Op, C, Ty);
+ // Cast the argument at the beginning of the entry block, after
+ // any bitcasts of other arguments.
if (Argument *A = dyn_cast<Argument>(V)) {
- // Check to see if there is already a cast!
- for (Value::use_iterator UI = A->use_begin(), E = A->use_end();
- UI != E; ++UI)
- if ((*UI)->getType() == Ty)
- if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
- if (CI->getOpcode() == Op) {
- // If the cast isn't the first instruction of the function, move it.
- if (BasicBlock::iterator(CI) !=
- A->getParent()->getEntryBlock().begin()) {
- // Recreate the cast at the beginning of the entry block.
- // The old cast is left in place in case it is being used
- // as an insert point.
- Instruction *NewCI =
- CastInst::Create(Op, V, Ty, "",
- A->getParent()->getEntryBlock().begin());
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
- return NewCI;
- }
- return CI;
- }
-
- Instruction *I = CastInst::Create(Op, V, Ty, V->getName(),
- A->getParent()->getEntryBlock().begin());
- rememberInstruction(I);
- return I;
+ BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
+ while ((isa<BitCastInst>(IP) &&
+ isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
+ cast<BitCastInst>(IP)->getOperand(0) != A) ||
+ isa<DbgInfoIntrinsic>(IP))
+ ++IP;
+ return ReuseOrCreateCast(A, Ty, Op, IP);
}
+ // Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
-
- // Check to see if there is already a cast. If there is, use it.
- for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI) {
- if ((*UI)->getType() == Ty)
- if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
- if (CI->getOpcode() == Op) {
- BasicBlock::iterator It = I; ++It;
- if (isa<InvokeInst>(I))
- It = cast<InvokeInst>(I)->getNormalDest()->begin();
- while (isa<PHINode>(It)) ++It;
- if (It != BasicBlock::iterator(CI)) {
- // Recreate the cast after the user.
- // The old cast is left in place in case it is being used
- // as an insert point.
- Instruction *NewCI = CastInst::Create(Op, V, Ty, "", It);
- NewCI->takeName(CI);
- CI->replaceAllUsesWith(NewCI);
- rememberInstruction(NewCI);
- return NewCI;
- }
- rememberInstruction(CI);
- return CI;
- }
- }
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
- while (isa<PHINode>(IP)) ++IP;
- Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP);
- rememberInstruction(CI);
- return CI;
+ while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
+ return ReuseOrCreateCast(I, Ty, Op, IP);
}
/// InsertBinop - Insert the specified binary operator, doing a small amount
@@ -295,11 +290,11 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
// the sum into a single value, so just use that.
Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
- Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+ Ops.append(Add->op_begin(), Add->op_end());
else if (!Sum->isZero())
Ops.push_back(Sum);
// Then append the addrecs.
- Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+ Ops.append(AddRecs.begin(), AddRecs.end());
}
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
@@ -322,7 +317,7 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
A->getLoop()));
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
Ops[i] = Zero;
- Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+ Ops.append(Add->op_begin(), Add->op_end());
e += Add->getNumOperands();
} else {
Ops[i] = Start;
@@ -330,7 +325,7 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
}
if (!AddRecs.empty()) {
// Add the addrecs onto the end of the list.
- Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+ Ops.append(AddRecs.begin(), AddRecs.end());
// Resort the operand list, moving any constants to the front.
SimplifyAddOperands(Ops, Ty, SE);
}
@@ -1070,7 +1065,8 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
BasicBlock::iterator NewInsertPt =
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
- while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
+ while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt))
+ ++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
@@ -1107,8 +1103,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
}
// {0,+,1} --> Insert a canonical induction variable into the loop!
- if (S->isAffine() &&
- S->getOperand(1) == SE.getConstant(Ty, 1)) {
+ if (S->isAffine() && S->getOperand(1)->isOne()) {
// If there's a canonical IV, just use it.
if (CanonicalIV) {
assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
@@ -1125,17 +1120,19 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = pred_begin(Header), HPE = pred_end(Header);
- HPI != HPE; ++HPI)
- if (L->contains(*HPI)) {
+ HPI != HPE; ++HPI) {
+ BasicBlock *HP = *HPI;
+ if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
// corresponding to the back-edge.
Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
- (*HPI)->getTerminator());
+ HP->getTerminator());
rememberInstruction(Add);
- PN->addIncoming(Add, *HPI);
+ PN->addIncoming(Add, HP);
} else {
- PN->addIncoming(Constant::getNullValue(Ty), *HPI);
+ PN->addIncoming(Constant::getNullValue(Ty), HP);
}
+ }
}
// {0,+,F} --> {0,+,1} * F
@@ -1312,7 +1309,9 @@ Value *SCEVExpander::expand(const SCEV *S) {
}
void SCEVExpander::rememberInstruction(Value *I) {
- if (PostIncLoops.empty())
+ if (!PostIncLoops.empty())
+ InsertedPostIncValues.insert(I);
+ else
InsertedValues.insert(I);
// If we just claimed an existing instruction and that instruction had
diff --git a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
index 75c381d..563fd2f 100644
--- a/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/contrib/llvm/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -105,22 +105,25 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
case NormalizeAutodetect:
if (Instruction *OI = dyn_cast<Instruction>(OperandValToReplace))
if (IVUseShouldUsePostIncValue(User, OI, L, &DT)) {
- Result = SE.getMinusSCEV(Result, AR->getStepRecurrence(SE));
+ const SCEV *TransformedStep =
+ TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
+ User, OperandValToReplace, Loops, SE, DT);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
Loops.insert(L);
}
break;
case Normalize:
- if (Loops.count(L))
- Result = SE.getMinusSCEV(Result, AR->getStepRecurrence(SE));
- break;
- case Denormalize:
if (Loops.count(L)) {
const SCEV *TransformedStep =
TransformForPostIncUse(Kind, AR->getStepRecurrence(SE),
User, OperandValToReplace, Loops, SE, DT);
- Result = SE.getAddExpr(Result, TransformedStep);
+ Result = SE.getMinusSCEV(Result, TransformedStep);
}
break;
+ case Denormalize:
+ if (Loops.count(L))
+ Result = SE.getAddExpr(Result, AR->getStepRecurrence(SE));
+ break;
}
return Result;
}
diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp
index 7e8ec2e..b4c9884 100644
--- a/contrib/llvm/lib/Analysis/ValueTracking.cpp
+++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp
@@ -953,7 +953,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
// sqrt(-0.0) = -0.0, no other negative results are possible.
if (II->getIntrinsicID() == Intrinsic::sqrt)
- return CannotBeNegativeZero(II->getOperand(1), Depth+1);
+ return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
if (const CallInst *CI = dyn_cast<CallInst>(I))
if (const Function *F = CI->getCalledFunction()) {
@@ -966,7 +966,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
if (F->getName() == "fabsl") return true;
if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
F->getName() == "sqrtl")
- return CannotBeNegativeZero(CI->getOperand(1), Depth+1);
+ return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
}
}
diff --git a/contrib/llvm/lib/Archive/ArchiveWriter.cpp b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
index 21d4f65..7eeeb59 100644
--- a/contrib/llvm/lib/Archive/ArchiveWriter.cpp
+++ b/contrib/llvm/lib/Archive/ArchiveWriter.cpp
@@ -366,8 +366,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
// Check for errors opening or creating archive file.
if (!ArchiveFile.is_open() || ArchiveFile.bad()) {
- if (TmpArchive.exists())
- TmpArchive.eraseFromDisk();
+ TmpArchive.eraseFromDisk();
if (ErrMsg)
*ErrMsg = "Error opening archive file: " + archPath.str();
return true;
@@ -387,8 +386,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
for (MembersList::iterator I = begin(), E = end(); I != E; ++I) {
if (writeMember(*I, ArchiveFile, CreateSymbolTable,
TruncateNames, Compress, ErrMsg)) {
- if (TmpArchive.exists())
- TmpArchive.eraseFromDisk();
+ TmpArchive.eraseFromDisk();
ArchiveFile.close();
return true;
}
@@ -420,8 +418,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
std::ofstream FinalFile(FinalFilePath.c_str(), io_mode);
if (!FinalFile.is_open() || FinalFile.bad()) {
- if (TmpArchive.exists())
- TmpArchive.eraseFromDisk();
+ TmpArchive.eraseFromDisk();
if (ErrMsg)
*ErrMsg = "Error opening archive file: " + FinalFilePath.str();
return true;
@@ -438,8 +435,7 @@ Archive::writeToDisk(bool CreateSymbolTable, bool TruncateNames, bool Compress,
if (foreignST) {
if (writeMember(*foreignST, FinalFile, false, false, false, ErrMsg)) {
FinalFile.close();
- if (TmpArchive.exists())
- TmpArchive.eraseFromDisk();
+ TmpArchive.eraseFromDisk();
return true;
}
}
diff --git a/contrib/llvm/lib/AsmParser/LLLexer.cpp b/contrib/llvm/lib/AsmParser/LLLexer.cpp
index 9b4370f..f4c0e50 100644
--- a/contrib/llvm/lib/AsmParser/LLLexer.cpp
+++ b/contrib/llvm/lib/AsmParser/LLLexer.cpp
@@ -492,6 +492,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(private);
KEYWORD(linker_private);
+ KEYWORD(linker_private_weak);
KEYWORD(internal);
KEYWORD(available_externally);
KEYWORD(linkonce);
diff --git a/contrib/llvm/lib/AsmParser/LLParser.cpp b/contrib/llvm/lib/AsmParser/LLParser.cpp
index 226d8d3..221b994 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.cpp
+++ b/contrib/llvm/lib/AsmParser/LLParser.cpp
@@ -196,19 +196,20 @@ bool LLParser::ParseTopLevelEntities() {
// optional leading prefixes, the production is:
// GlobalVar ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
// OptionalAddrSpace ('constant'|'global') ...
- case lltok::kw_private : // OptionalLinkage
- case lltok::kw_linker_private: // OptionalLinkage
- case lltok::kw_internal: // OptionalLinkage
- case lltok::kw_weak: // OptionalLinkage
- case lltok::kw_weak_odr: // OptionalLinkage
- case lltok::kw_linkonce: // OptionalLinkage
- case lltok::kw_linkonce_odr: // OptionalLinkage
- case lltok::kw_appending: // OptionalLinkage
- case lltok::kw_dllexport: // OptionalLinkage
- case lltok::kw_common: // OptionalLinkage
- case lltok::kw_dllimport: // OptionalLinkage
- case lltok::kw_extern_weak: // OptionalLinkage
- case lltok::kw_external: { // OptionalLinkage
+ case lltok::kw_private: // OptionalLinkage
+ case lltok::kw_linker_private: // OptionalLinkage
+ case lltok::kw_linker_private_weak: // OptionalLinkage
+ case lltok::kw_internal: // OptionalLinkage
+ case lltok::kw_weak: // OptionalLinkage
+ case lltok::kw_weak_odr: // OptionalLinkage
+ case lltok::kw_linkonce: // OptionalLinkage
+ case lltok::kw_linkonce_odr: // OptionalLinkage
+ case lltok::kw_appending: // OptionalLinkage
+ case lltok::kw_dllexport: // OptionalLinkage
+ case lltok::kw_common: // OptionalLinkage
+ case lltok::kw_dllimport: // OptionalLinkage
+ case lltok::kw_extern_weak: // OptionalLinkage
+ case lltok::kw_external: { // OptionalLinkage
unsigned Linkage, Visibility;
if (ParseOptionalLinkage(Linkage) ||
ParseOptionalVisibility(Visibility) ||
@@ -543,20 +544,21 @@ bool LLParser::ParseNamedMetadata() {
return true;
SmallVector<MDNode *, 8> Elts;
- do {
- // Null is a special case since it is typeless.
- if (EatIfPresent(lltok::kw_null)) {
- Elts.push_back(0);
- continue;
- }
+ if (Lex.getKind() != lltok::rbrace)
+ do {
+ // Null is a special case since it is typeless.
+ if (EatIfPresent(lltok::kw_null)) {
+ Elts.push_back(0);
+ continue;
+ }
- if (ParseToken(lltok::exclaim, "Expected '!' here"))
- return true;
+ if (ParseToken(lltok::exclaim, "Expected '!' here"))
+ return true;
- MDNode *N = 0;
- if (ParseMDNodeID(N)) return true;
- Elts.push_back(N);
- } while (EatIfPresent(lltok::comma));
+ MDNode *N = 0;
+ if (ParseMDNodeID(N)) return true;
+ Elts.push_back(N);
+ } while (EatIfPresent(lltok::comma));
if (ParseToken(lltok::rbrace, "expected end of metadata node"))
return true;
@@ -629,7 +631,8 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc,
Linkage != GlobalValue::WeakODRLinkage &&
Linkage != GlobalValue::InternalLinkage &&
Linkage != GlobalValue::PrivateLinkage &&
- Linkage != GlobalValue::LinkerPrivateLinkage)
+ Linkage != GlobalValue::LinkerPrivateLinkage &&
+ Linkage != GlobalValue::LinkerPrivateWeakLinkage)
return Error(LinkageLoc, "invalid linkage type for alias");
Constant *Aliasee;
@@ -1013,11 +1016,13 @@ bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
/// ::= /*empty*/
/// ::= 'private'
/// ::= 'linker_private'
+/// ::= 'linker_private_weak'
/// ::= 'internal'
/// ::= 'weak'
/// ::= 'weak_odr'
/// ::= 'linkonce'
/// ::= 'linkonce_odr'
+/// ::= 'available_externally'
/// ::= 'appending'
/// ::= 'dllexport'
/// ::= 'common'
@@ -1030,6 +1035,9 @@ bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) {
default: Res=GlobalValue::ExternalLinkage; return false;
case lltok::kw_private: Res = GlobalValue::PrivateLinkage; break;
case lltok::kw_linker_private: Res = GlobalValue::LinkerPrivateLinkage; break;
+ case lltok::kw_linker_private_weak:
+ Res = GlobalValue::LinkerPrivateWeakLinkage;
+ break;
case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break;
case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break;
case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break;
@@ -2014,33 +2022,8 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
ID.StrVal = Lex.getStrVal();
ID.Kind = ValID::t_LocalName;
break;
- case lltok::exclaim: // !{...} MDNode, !"foo" MDString
- Lex.Lex();
-
- if (EatIfPresent(lltok::lbrace)) {
- SmallVector<Value*, 16> Elts;
- if (ParseMDNodeVector(Elts, PFS) ||
- ParseToken(lltok::rbrace, "expected end of metadata node"))
- return true;
-
- ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size());
- ID.Kind = ValID::t_MDNode;
- return false;
- }
-
- // Standalone metadata reference
- // !{ ..., !42, ... }
- if (Lex.getKind() == lltok::APSInt) {
- if (ParseMDNodeID(ID.MDNodeVal)) return true;
- ID.Kind = ValID::t_MDNode;
- return false;
- }
-
- // MDString:
- // ::= '!' STRINGCONSTANT
- if (ParseMDString(ID.MDStringVal)) return true;
- ID.Kind = ValID::t_MDString;
- return false;
+ case lltok::exclaim: // !42, !{...}, or !"foo"
+ return ParseMetadataValue(ID, PFS);
case lltok::APSInt:
ID.APSIntVal = Lex.getAPSIntVal();
ID.Kind = ValID::t_APSInt;
@@ -2521,6 +2504,42 @@ bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts) {
return false;
}
+/// ParseMetadataValue
+/// ::= !42
+/// ::= !{...}
+/// ::= !"string"
+bool LLParser::ParseMetadataValue(ValID &ID, PerFunctionState *PFS) {
+ assert(Lex.getKind() == lltok::exclaim);
+ Lex.Lex();
+
+ // MDNode:
+ // !{ ... }
+ if (EatIfPresent(lltok::lbrace)) {
+ SmallVector<Value*, 16> Elts;
+ if (ParseMDNodeVector(Elts, PFS) ||
+ ParseToken(lltok::rbrace, "expected end of metadata node"))
+ return true;
+
+ ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size());
+ ID.Kind = ValID::t_MDNode;
+ return false;
+ }
+
+ // Standalone metadata reference
+ // !42
+ if (Lex.getKind() == lltok::APSInt) {
+ if (ParseMDNodeID(ID.MDNodeVal)) return true;
+ ID.Kind = ValID::t_MDNode;
+ return false;
+ }
+
+ // MDString:
+ // ::= '!' STRINGCONSTANT
+ if (ParseMDString(ID.MDStringVal)) return true;
+ ID.Kind = ValID::t_MDString;
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Function Parsing.
@@ -2704,6 +2723,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
break;
case GlobalValue::PrivateLinkage:
case GlobalValue::LinkerPrivateLinkage:
+ case GlobalValue::LinkerPrivateWeakLinkage:
case GlobalValue::InternalLinkage:
case GlobalValue::AvailableExternallyLinkage:
case GlobalValue::LinkOnceAnyLinkage:
@@ -3791,8 +3811,8 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS,
}
}
- if (Size && !Size->getType()->isIntegerTy(32))
- return Error(SizeLoc, "element count must be i32");
+ if (Size && !Size->getType()->isIntegerTy())
+ return Error(SizeLoc, "element count must have integer type");
if (isAlloca) {
Inst = new AllocaInst(Ty, Size, Alignment);
@@ -3801,6 +3821,8 @@ int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS,
// Autoupgrade old malloc instruction to malloc call.
// FIXME: Remove in LLVM 3.0.
+ if (Size && !Size->getType()->isIntegerTy(32))
+ return Error(SizeLoc, "element count must be i32");
const Type *IntPtrTy = Type::getInt32Ty(Context);
Constant *AllocSize = ConstantExpr::getSizeOf(Ty);
AllocSize = ConstantExpr::getTruncOrBitCast(AllocSize, IntPtrTy);
@@ -3973,6 +3995,10 @@ int LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'null' | TypeAndValue
bool LLParser::ParseMDNodeVector(SmallVectorImpl<Value*> &Elts,
PerFunctionState *PFS) {
+ // Check for an empty list.
+ if (Lex.getKind() == lltok::rbrace)
+ return false;
+
do {
// Null is a special case since it is typeless.
if (EatIfPresent(lltok::kw_null)) {
diff --git a/contrib/llvm/lib/AsmParser/LLParser.h b/contrib/llvm/lib/AsmParser/LLParser.h
index c8f669f..f765a2a 100644
--- a/contrib/llvm/lib/AsmParser/LLParser.h
+++ b/contrib/llvm/lib/AsmParser/LLParser.h
@@ -308,6 +308,7 @@ namespace llvm {
bool ParseGlobalValue(const Type *Ty, Constant *&V);
bool ParseGlobalTypeAndValue(Constant *&V);
bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
+ bool ParseMetadataValue(ValID &ID, PerFunctionState *PFS);
bool ParseMDNodeVector(SmallVectorImpl<Value*> &, PerFunctionState *PFS);
// Function Parsing.
diff --git a/contrib/llvm/lib/AsmParser/LLToken.h b/contrib/llvm/lib/AsmParser/LLToken.h
index 5eed170..2703134 100644
--- a/contrib/llvm/lib/AsmParser/LLToken.h
+++ b/contrib/llvm/lib/AsmParser/LLToken.h
@@ -37,9 +37,9 @@ namespace lltok {
kw_declare, kw_define,
kw_global, kw_constant,
- kw_private, kw_linker_private, kw_internal, kw_linkonce, kw_linkonce_odr,
- kw_weak, kw_weak_odr, kw_appending, kw_dllimport, kw_dllexport, kw_common,
- kw_available_externally,
+ kw_private, kw_linker_private, kw_linker_private_weak, kw_internal,
+ kw_linkonce, kw_linkonce_odr, kw_weak, kw_weak_odr, kw_appending,
+ kw_dllimport, kw_dllexport, kw_common, kw_available_externally,
kw_default, kw_hidden, kw_protected,
kw_extern_weak,
kw_external, kw_thread_local,
diff --git a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 69adead..b3f0776 100644
--- a/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/contrib/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -75,6 +75,7 @@ static GlobalValue::LinkageTypes GetDecodedLinkage(unsigned Val) {
case 11: return GlobalValue::LinkOnceODRLinkage;
case 12: return GlobalValue::AvailableExternallyLinkage;
case 13: return GlobalValue::LinkerPrivateLinkage;
+ case 14: return GlobalValue::LinkerPrivateWeakLinkage;
}
}
@@ -252,17 +253,18 @@ void BitcodeReaderValueList::ResolveConstantForwardRefs() {
// at once.
while (!Placeholder->use_empty()) {
Value::use_iterator UI = Placeholder->use_begin();
+ User *U = *UI;
// If the using object isn't uniqued, just update the operands. This
// handles instructions and initializers for global variables.
- if (!isa<Constant>(*UI) || isa<GlobalValue>(*UI)) {
+ if (!isa<Constant>(U) || isa<GlobalValue>(U)) {
UI.getUse().set(RealVal);
continue;
}
// Otherwise, we have a constant that uses the placeholder. Replace that
// constant with a new constant that has *all* placeholder uses updated.
- Constant *UserC = cast<Constant>(*UI);
+ Constant *UserC = cast<Constant>(U);
for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end();
I != E; ++I) {
Value *NewOp;
@@ -818,7 +820,7 @@ bool BitcodeReader::ParseMetadata() {
IsFunctionLocal = true;
// fall-through
case bitc::METADATA_NODE: {
- if (Record.empty() || Record.size() % 2 == 1)
+ if (Record.size() % 2 == 1)
return Error("Invalid METADATA_NODE record");
unsigned Size = Record.size();
@@ -832,7 +834,8 @@ bool BitcodeReader::ParseMetadata() {
else
Elts.push_back(NULL);
}
- Value *V = MDNode::getWhenValsUnresolved(Context, &Elts[0], Elts.size(),
+ Value *V = MDNode::getWhenValsUnresolved(Context,
+ Elts.data(), Elts.size(),
IsFunctionLocal);
IsFunctionLocal = false;
MDValueList.AssignValue(V, NextMDValueNo++);
@@ -2178,13 +2181,18 @@ bool BitcodeReader::ParseFunctionBody(Function *F) {
InstructionList.push_back(I);
break;
}
- case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, op, align]
- if (Record.size() < 3)
+ case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
+ // For backward compatibility, tolerate a lack of an opty, and use i32.
+ // LLVM 3.0: Remove this.
+ if (Record.size() < 3 || Record.size() > 4)
return Error("Invalid ALLOCA record");
+ unsigned OpNum = 0;
const PointerType *Ty =
- dyn_cast_or_null<PointerType>(getTypeByID(Record[0]));
- Value *Size = getFnValueByID(Record[1], Type::getInt32Ty(Context));
- unsigned Align = Record[2];
+ dyn_cast_or_null<PointerType>(getTypeByID(Record[OpNum++]));
+ const Type *OpTy = Record.size() == 4 ? getTypeByID(Record[OpNum++]) :
+ Type::getInt32Ty(Context);
+ Value *Size = getFnValueByID(Record[OpNum++], OpTy);
+ unsigned Align = Record[OpNum++];
if (!Ty || !Size) return Error("Invalid ALLOCA record");
I = new AllocaInst(Ty->getElementType(), Size, (1 << Align) >> 1);
InstructionList.push_back(I);
diff --git a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 9bda6dc..fa1b2c4 100644
--- a/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -313,6 +313,7 @@ static unsigned getEncodedLinkage(const GlobalValue *GV) {
case GlobalValue::LinkOnceODRLinkage: return 11;
case GlobalValue::AvailableExternallyLinkage: return 12;
case GlobalValue::LinkerPrivateLinkage: return 13;
+ case GlobalValue::LinkerPrivateWeakLinkage: return 14;
}
}
@@ -577,10 +578,9 @@ static void WriteFunctionLocalMetadata(const Function &F,
BitstreamWriter &Stream) {
bool StartedMetadataBlock = false;
SmallVector<uint64_t, 64> Record;
- const ValueEnumerator::ValueList &Vals = VE.getMDValues();
-
+ const SmallVector<const MDNode *, 8> &Vals = VE.getFunctionLocalMDValues();
for (unsigned i = 0, e = Vals.size(); i != e; ++i)
- if (const MDNode *N = dyn_cast<MDNode>(Vals[i].first))
+ if (const MDNode *N = Vals[i])
if (N->isFunctionLocal() && N->getFunction() == &F) {
if (!StartedMetadataBlock) {
Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
@@ -588,7 +588,7 @@ static void WriteFunctionLocalMetadata(const Function &F,
}
WriteMDNode(N, VE, Stream, Record);
}
-
+
if (StartedMetadataBlock)
Stream.ExitBlock();
}
@@ -1114,6 +1114,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
case Instruction::Alloca:
Code = bitc::FUNC_CODE_INST_ALLOCA;
Vals.push_back(VE.getTypeID(I.getType()));
+ Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
Vals.push_back(Log2_32(cast<AllocaInst>(I).getAlignment())+1);
break;
@@ -1134,26 +1135,25 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
Vals.push_back(cast<StoreInst>(I).isVolatile());
break;
case Instruction::Call: {
- const PointerType *PTy = cast<PointerType>(I.getOperand(0)->getType());
+ const CallInst &CI = cast<CallInst>(I);
+ const PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
Code = bitc::FUNC_CODE_INST_CALL;
- const CallInst *CI = cast<CallInst>(&I);
- Vals.push_back(VE.getAttributeID(CI->getAttributes()));
- Vals.push_back((CI->getCallingConv() << 1) | unsigned(CI->isTailCall()));
- PushValueAndType(CI->getOperand(0), InstID, Vals, VE); // Callee
+ Vals.push_back(VE.getAttributeID(CI.getAttributes()));
+ Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()));
+ PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee
// Emit value #'s for the fixed parameters.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Vals.push_back(VE.getValueID(I.getOperand(i+1))); // fixed param.
+ Vals.push_back(VE.getValueID(CI.getArgOperand(i))); // fixed param.
// Emit type/value pairs for varargs params.
if (FTy->isVarArg()) {
- unsigned NumVarargs = I.getNumOperands()-1-FTy->getNumParams();
- for (unsigned i = I.getNumOperands()-NumVarargs, e = I.getNumOperands();
+ for (unsigned i = FTy->getNumParams(), e = CI.getNumArgOperands();
i != e; ++i)
- PushValueAndType(I.getOperand(i), InstID, Vals, VE); // varargs
+ PushValueAndType(CI.getArgOperand(i), InstID, Vals, VE); // varargs
}
break;
}
@@ -1662,15 +1662,8 @@ void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out) {
WriteBitcodeToStream( M, Stream );
- // If writing to stdout, set binary mode.
- if (&llvm::outs() == &Out)
- sys::Program::ChangeStdoutToBinary();
-
// Write the generated bitstream to "Out".
Out.write((char*)&Buffer.front(), Buffer.size());
-
- // Make sure it hits disk now.
- Out.flush();
}
/// WriteBitcodeToStream - Write the specified module to the specified output
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
index d2baec7..7fa425a 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -72,7 +72,7 @@ ValueEnumerator::ValueEnumerator(const Module *M) {
// Enumerate types used by the type symbol table.
EnumerateTypeSymbolTable(M->getTypeSymbolTable());
- // Insert constants and metadata that are named at module level into the slot
+ // Insert constants and metadata that are named at module level into the slot
// pool so that the module symbol table can refer to them...
EnumerateValueSymbolTable(M->getValueSymbolTable());
EnumerateMDSymbolTable(M->getMDSymbolTable());
@@ -257,6 +257,8 @@ void ValueEnumerator::EnumerateMetadata(const Value *MD) {
else
EnumerateType(Type::getVoidTy(MD->getContext()));
}
+ if (N->isFunctionLocal() && N->getFunction())
+ FunctionLocalMDs.push_back(N);
return;
}
@@ -414,7 +416,8 @@ void ValueEnumerator::incorporateFunction(const Function &F) {
FirstInstID = Values.size();
- SmallVector<MDNode *, 8> FunctionLocalMDs;
+ FunctionLocalMDs.clear();
+ SmallVector<MDNode *, 8> FnLocalMDVector;
// Add all of the instructions.
for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E; ++I) {
@@ -423,7 +426,7 @@ void ValueEnumerator::incorporateFunction(const Function &F) {
if (MDNode *MD = dyn_cast<MDNode>(*OI))
if (MD->isFunctionLocal() && MD->getFunction())
// Enumerate metadata after the instructions they might refer to.
- FunctionLocalMDs.push_back(MD);
+ FnLocalMDVector.push_back(MD);
}
if (!I->getType()->isVoidTy())
EnumerateValue(I);
@@ -431,8 +434,8 @@ void ValueEnumerator::incorporateFunction(const Function &F) {
}
// Add all of the function-local metadata.
- for (unsigned i = 0, e = FunctionLocalMDs.size(); i != e; ++i)
- EnumerateOperandType(FunctionLocalMDs[i]);
+ for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i)
+ EnumerateOperandType(FnLocalMDVector[i]);
}
void ValueEnumerator::purgeFunction() {
diff --git a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
index 4f8ebf5..2b9b15f 100644
--- a/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/contrib/llvm/lib/Bitcode/Writer/ValueEnumerator.h
@@ -15,6 +15,7 @@
#define VALUE_ENUMERATOR_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Attributes.h"
#include <vector>
@@ -26,7 +27,7 @@ class Instruction;
class BasicBlock;
class Function;
class Module;
-class MetadataBase;
+class MDNode;
class NamedMDNode;
class AttrListPtr;
class TypeSymbolTable;
@@ -49,6 +50,7 @@ private:
ValueMapType ValueMap;
ValueList Values;
ValueList MDValues;
+ SmallVector<const MDNode *, 8> FunctionLocalMDs;
ValueMapType MDValueMap;
typedef DenseMap<void*, unsigned> AttributeMapType;
@@ -105,6 +107,9 @@ public:
const ValueList &getValues() const { return Values; }
const ValueList &getMDValues() const { return MDValues; }
+ const SmallVector<const MDNode *, 8> &getFunctionLocalMDValues() const {
+ return FunctionLocalMDs;
+ }
const TypeList &getTypes() const { return Types; }
const std::vector<const BasicBlock*> &getBasicBlocks() const {
return BasicBlocks;
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 4008a6a..a7189ac 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -114,6 +115,7 @@ AggressiveAntiDepBreaker(MachineFunction& MFi,
TargetSubtarget::RegClassVector& CriticalPathRCs) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF)),
State(NULL) {
@@ -163,25 +165,27 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- State->UnionGroups(AliasReg, 0);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ State->UnionGroups(Reg, 0);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ State->UnionGroups(AliasReg, 0);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -390,7 +394,8 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) {
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI)) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -443,6 +448,26 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
+ // If MI's uses have special allocation requirement, don't allow
+ // any use registers to be changed. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register uses for this instruction and update
// live-ranges, groups and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -459,10 +484,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// for the register.
HandleLastUse(Reg, Count, "(last-use)");
- // If MI's uses have special allocation requirement, don't allow
- // any use registers to be changed. Also assume all registers
- // used in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (Special) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -604,8 +626,12 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters(
// order. If that register is available, and the corresponding
// registers are available for the other group subregisters, then we
// can use those registers to rename.
+
+ // FIXME: Using getMinimalPhysRegClass is very conservative. We should
+ // check every use of the register and find the largest register class
+ // that can be used in all of them.
const TargetRegisterClass *SuperRC =
- TRI->getPhysicalRegisterRegClass(SuperReg, MVT::Other);
+ TRI->getMinimalPhysRegClass(SuperReg, MVT::Other);
const TargetRegisterClass::iterator RB = SuperRC->allocation_order_begin(MF);
const TargetRegisterClass::iterator RE = SuperRC->allocation_order_end(MF);
@@ -905,6 +931,19 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
AggressiveAntiDepState::RegisterReference>::iterator
Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second.Operand->setReg(NewReg);
+ // If the SU for the instruction being updated has debug
+ // information related to the anti-dependency register, make
+ // sure to update that as well.
+ const SUnit *SU = MISUnitMap[Q->second.Operand->getParent()];
+ if (!SU) continue;
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) {
+ MachineInstr *DI = SU->DbgInstrList[i];
+ assert (DI->getNumOperands()==3 && DI->getOperand(0).isReg() &&
+ DI->getOperand(0).getReg()
+ && "Non register dbg_value attached to SUnit!");
+ if (DI->getOperand(0).getReg() == AntiDepReg)
+ DI->getOperand(0).setReg(NewReg);
+ }
}
// We just went back in time and modified history; the
diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
index 506d43e..91ebb85 100644
--- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -115,6 +115,7 @@ namespace llvm {
class AggressiveAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 5a0c27b..db1b37a 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -178,7 +178,7 @@ bool AsmPrinter::doInitialization(Module &M) {
if (!M.getModuleInlineAsm().empty()) {
OutStreamer.AddComment("Start of file scope inline assembly");
OutStreamer.AddBlankLine();
- EmitInlineAsm(M.getModuleInlineAsm(), 0/*no loc cookie*/);
+ EmitInlineAsm(M.getModuleInlineAsm()+"\n", 0/*no loc cookie*/);
OutStreamer.AddComment("End of file scope inline assembly");
OutStreamer.AddBlankLine();
}
@@ -199,7 +199,7 @@ void AsmPrinter::EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const {
case GlobalValue::LinkOnceODRLinkage:
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
- case GlobalValue::LinkerPrivateLinkage:
+ case GlobalValue::LinkerPrivateWeakLinkage:
if (MAI->getWeakDefDirective() != 0) {
// .globl _foo
OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
@@ -225,6 +225,7 @@ void AsmPrinter::EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const {
break;
case GlobalValue::PrivateLinkage:
case GlobalValue::InternalLinkage:
+ case GlobalValue::LinkerPrivateLinkage:
break;
default:
llvm_unreachable("Unknown linkage type!");
@@ -330,7 +331,6 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
else if (GVKind.isThreadData()) {
OutStreamer.SwitchSection(TheSection);
- EmitLinkage(GV->getLinkage(), MangSym);
EmitAlignment(AlignLog, GV);
OutStreamer.EmitLabel(MangSym);
@@ -353,7 +353,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// - spare pointer, used when mapped by the runtime
// - pointer to mangled symbol above with initializer
unsigned PtrSize = TD->getPointerSizeInBits()/8;
- OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("__tlv_bootstrap"),
+ OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
PtrSize, 0);
OutStreamer.EmitIntValue(0, PtrSize, 0);
OutStreamer.EmitSymbolValue(MangSym, PtrSize, 0);
@@ -428,20 +428,12 @@ void AsmPrinter::EmitFunctionHeader() {
// Emit pre-function debug and/or EH information.
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->BeginFunction(MF);
- } else {
- DE->BeginFunction(MF);
- }
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DE->BeginFunction(MF);
}
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->beginFunction(MF);
- } else {
- DD->beginFunction(MF);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->beginFunction(MF);
}
}
@@ -458,14 +450,11 @@ void AsmPrinter::EmitFunctionEntryLabel() {
}
-/// EmitComments - Pretty-print comments for instructions.
-static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
- const MachineFunction *MF = MI.getParent()->getParent();
- const TargetMachine &TM = MF->getTarget();
-
- DebugLoc DL = MI.getDebugLoc();
+static void EmitDebugLoc(DebugLoc DL, const MachineFunction *MF,
+ raw_ostream &CommentOS) {
+ const LLVMContext &Ctx = MF->getFunction()->getContext();
if (!DL.isUnknown()) { // Print source line info.
- DIScope Scope(DL.getScope(MF->getFunction()->getContext()));
+ DIScope Scope(DL.getScope(Ctx));
// Omit the directory, because it's likely to be long and uninteresting.
if (Scope.Verify())
CommentOS << Scope.getFilename();
@@ -474,6 +463,23 @@ static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
CommentOS << ':' << DL.getLine();
if (DL.getCol() != 0)
CommentOS << ':' << DL.getCol();
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+ if (!InlinedAtDL.isUnknown()) {
+ CommentOS << "[ ";
+ EmitDebugLoc(InlinedAtDL, MF, CommentOS);
+ CommentOS << " ]";
+ }
+ }
+}
+
+/// EmitComments - Pretty-print comments for instructions.
+static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
+ const MachineFunction *MF = MI.getParent()->getParent();
+ const TargetMachine &TM = MF->getTarget();
+
+ DebugLoc DL = MI.getDebugLoc();
+ if (!DL.isUnknown()) { // Print source line info.
+ EmitDebugLoc(DL, MF, CommentOS);
CommentOS << '\n';
}
@@ -611,12 +617,8 @@ void AsmPrinter::EmitFunctionBody() {
}
if (ShouldPrintDebugScopes) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->beginScope(II);
- } else {
- DD->beginScope(II);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->beginScope(II);
}
if (isVerbose())
@@ -649,12 +651,8 @@ void AsmPrinter::EmitFunctionBody() {
}
if (ShouldPrintDebugScopes) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endScope(II);
- } else {
- DD->endScope(II);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->endScope(II);
}
}
}
@@ -692,20 +690,12 @@ void AsmPrinter::EmitFunctionBody() {
// Emit post-function debug information.
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endFunction(MF);
- } else {
- DD->endFunction(MF);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->endFunction(MF);
}
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->EndFunction();
- } else {
- DE->EndFunction();
- }
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DE->EndFunction();
}
MMI->EndFunction();
@@ -730,19 +720,15 @@ bool AsmPrinter::doFinalization(Module &M) {
// Finalize debug and EH information.
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->EndModule();
- } else {
+ {
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
DE->EndModule();
}
delete DE; DE = 0;
}
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endModule();
- } else {
+ {
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
DD->endModule();
}
delete DD; DD = 0;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
index ba6fed2..202d9b6 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
@@ -53,17 +53,6 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
}
SourceMgr SrcMgr;
-
- // Ensure the buffer is newline terminated.
- char *TmpString = 0;
- if (Str.back() != '\n') {
- TmpString = new char[Str.size() + 2];
- memcpy(TmpString, Str.data(), Str.size());
- TmpString[Str.size()] = '\n';
- TmpString[Str.size() + 1] = 0;
- isNullTerminated = true;
- Str = TmpString;
- }
// If the current LLVMContext has an inline asm handler, set it in SourceMgr.
LLVMContext &LLVMCtx = MMI->getModule()->getContext();
@@ -83,7 +72,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
// Tell SrcMgr about this buffer, it takes ownership of the buffer.
SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
- AsmParser Parser(SrcMgr, OutContext, OutStreamer, *MAI);
+ AsmParser Parser(TM.getTarget(), SrcMgr, OutContext, OutStreamer, *MAI);
OwningPtr<TargetAsmParser> TAP(TM.getTarget().createAsmParser(Parser));
if (!TAP)
report_fatal_error("Inline asm not supported by this streamer because"
@@ -95,9 +84,6 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, unsigned LocCookie) const {
/*NoFinalize*/ true);
if (Res && !HasDiagHandler)
report_fatal_error("Error parsing inline asm\n");
-
- if (TmpString)
- delete[] TmpString;
}
@@ -279,7 +265,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const {
// Okay, we finally have a value number. Ask the target to print this
// operand!
if (CurVariant == -1 || CurVariant == AsmPrinterVariant) {
- unsigned OpNo = 1;
+ unsigned OpNo = 2;
bool Error = false;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
index b2c70d5..21396ca 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -201,6 +201,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
case dwarf::DW_FORM_data8: Size = 8; break;
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
+ case dwarf::DW_FORM_addr: Size = Asm->getTargetData().getPointerSize(); break;
default: llvm_unreachable("DIE Value form not supported yet");
}
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -221,6 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
+ case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet"); break;
}
return 0;
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 890507c..65c1d19 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -44,7 +44,8 @@ using namespace llvm;
static cl::opt<bool> PrintDbgScope("print-dbgscope", cl::Hidden,
cl::desc("Print DbgScope information for each machine instruction"));
-static cl::opt<bool> DisableDebugInfoPrinting("disable-debug-info-print", cl::Hidden,
+static cl::opt<bool> DisableDebugInfoPrinting("disable-debug-info-print",
+ cl::Hidden,
cl::desc("Disable debug info printing"));
static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden,
@@ -79,15 +80,13 @@ class CompileUnit {
/// IndexTyDie - An anonymous type for index type. Owned by CUDie.
DIE *IndexTyDie;
- /// GVToDieMap - Tracks the mapping of unit level debug informaton
+ /// MDNodeToDieMap - Tracks the mapping of unit level debug informaton
/// variables to debug information entries.
- /// FIXME : Rename GVToDieMap -> NodeToDieMap
- DenseMap<const MDNode *, DIE *> GVToDieMap;
+ DenseMap<const MDNode *, DIE *> MDNodeToDieMap;
- /// GVToDIEEntryMap - Tracks the mapping of unit level debug informaton
+ /// MDNodeToDIEEntryMap - Tracks the mapping of unit level debug informaton
/// descriptors to debug information entries using a DIEEntry proxy.
- /// FIXME : Rename
- DenseMap<const MDNode *, DIEEntry *> GVToDIEEntryMap;
+ DenseMap<const MDNode *, DIEEntry *> MDNodeToDIEEntryMap;
/// Globals - A map of globally visible named entities for this unit.
///
@@ -123,25 +122,25 @@ public:
/// getDIE - Returns the debug information entry map slot for the
/// specified debug variable.
- DIE *getDIE(const MDNode *N) { return GVToDieMap.lookup(N); }
+ DIE *getDIE(const MDNode *N) { return MDNodeToDieMap.lookup(N); }
/// insertDIE - Insert DIE into the map.
void insertDIE(const MDNode *N, DIE *D) {
- GVToDieMap.insert(std::make_pair(N, D));
+ MDNodeToDieMap.insert(std::make_pair(N, D));
}
/// getDIEEntry - Returns the debug information entry for the speciefied
/// debug variable.
DIEEntry *getDIEEntry(const MDNode *N) {
- DenseMap<const MDNode *, DIEEntry *>::iterator I = GVToDIEEntryMap.find(N);
- if (I == GVToDIEEntryMap.end())
+ DenseMap<const MDNode *, DIEEntry *>::iterator I = MDNodeToDIEEntryMap.find(N);
+ if (I == MDNodeToDIEEntryMap.end())
return NULL;
return I->second;
}
/// insertDIEEntry - Insert debug information entry into the map.
void insertDIEEntry(const MDNode *N, DIEEntry *E) {
- GVToDIEEntryMap.insert(std::make_pair(N, E));
+ MDNodeToDIEEntryMap.insert(std::make_pair(N, E));
}
/// addDie - Adds or interns the DIE to the compile unit.
@@ -321,12 +320,12 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
DwarfFrameSectionSym = DwarfInfoSectionSym = DwarfAbbrevSectionSym = 0;
DwarfStrSectionSym = TextSectionSym = 0;
DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
+ DwarfDebugLineSectionSym = CurrentLineSectionSym = 0;
FunctionBeginSym = FunctionEndSym = 0;
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- beginModule(M);
- } else {
- beginModule(M);
+ DIEIntegerOne = new (DIEValueAllocator) DIEInteger(1);
+ {
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ beginModule(M);
}
}
DwarfDebug::~DwarfDebug() {
@@ -378,7 +377,8 @@ DIEEntry *DwarfDebug::createDIEEntry(DIE *Entry) {
void DwarfDebug::addUInt(DIE *Die, unsigned Attribute,
unsigned Form, uint64_t Integer) {
if (!Form) Form = DIEInteger::BestForm(false, Integer);
- DIEValue *Value = new (DIEValueAllocator) DIEInteger(Integer);
+ DIEValue *Value = Integer == 1 ?
+ DIEIntegerOne : new (DIEValueAllocator) DIEInteger(Integer);
Die->addValue(Attribute, Form, Value);
}
@@ -866,6 +866,10 @@ void DwarfDebug::addToContextOwner(DIE *Die, DIDescriptor Context) {
} else if (Context.isNameSpace()) {
DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context));
ContextDIE->addChild(Die);
+ } else if (Context.isSubprogram()) {
+ DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context),
+ /*MakeDecl=*/false);
+ ContextDIE->addChild(Die);
} else if (DIE *ContextDIE = getCompileUnit(Context)->getDIE(Context))
ContextDIE->addChild(Die);
else
@@ -1055,6 +1059,10 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (DIDescriptor(ContainingType).isCompositeType())
addDIEEntry(&Buffer, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4,
getOrCreateTypeDIE(DIType(ContainingType)));
+ else {
+ DIDescriptor Context = CTy.getContext();
+ addToContextOwner(&Buffer, Context);
+ }
break;
}
default:
@@ -1065,8 +1073,9 @@ void DwarfDebug::constructTypeDIE(DIE &Buffer, DICompositeType CTy) {
if (!Name.empty())
addString(&Buffer, dwarf::DW_AT_name, dwarf::DW_FORM_string, Name);
- if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type ||
- Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type) {
+ if (Tag == dwarf::DW_TAG_enumeration_type || Tag == dwarf::DW_TAG_class_type
+ || Tag == dwarf::DW_TAG_structure_type || Tag == dwarf::DW_TAG_union_type)
+ {
// Add size if non-zero (derived types might be zero-sized.)
if (Size)
addUInt(&Buffer, dwarf::DW_AT_byte_size, 0, Size);
@@ -1329,6 +1338,9 @@ DIE *DwarfDebug::createSubprogramDIE(const DISubprogram &SP, bool MakeDecl) {
// DW_TAG_inlined_subroutine may refer to this DIE.
SPCU->insertDIE(SP, SPDie);
+ // Add to context owner.
+ addToContextOwner(SPDie, SP.getContext());
+
return SPDie;
}
@@ -1379,6 +1391,7 @@ static bool isSubprogramContext(const MDNode *Context) {
DIE *DwarfDebug::updateSubprogramScopeDIE(const MDNode *SPNode) {
CompileUnit *SPCU = getCompileUnit(SPNode);
DIE *SPDie = SPCU->getDIE(SPNode);
+
assert(SPDie && "Unable to find subprogram DIE!");
DISubprogram SP(SPNode);
@@ -1412,6 +1425,14 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(const MDNode *SPNode) {
SPCU->addDie(SPDie);
}
+ // Pick up abstract subprogram DIE.
+ if (DIE *AbsSPDIE = AbstractSPDies.lookup(SPNode)) {
+ SPDie = new DIE(dwarf::DW_TAG_subprogram);
+ addDIEEntry(SPDie, dwarf::DW_AT_abstract_origin,
+ dwarf::DW_FORM_ref4, AbsSPDIE);
+ SPCU->addDie(SPDie);
+ }
+
addLabel(SPDie, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr,
Asm->GetTempSymbol("func_begin", Asm->getFunctionNumber()));
addLabel(SPDie, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr,
@@ -1483,7 +1504,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(DbgScope *Scope) {
const MCSymbol *StartLabel = getLabelBeforeInsn(RI->first);
const MCSymbol *EndLabel = getLabelAfterInsn(RI->second);
- if (StartLabel == FunctionBeginSym || EndLabel == 0) {
+ if (StartLabel == 0 || EndLabel == 0) {
assert (0 && "Unexpected Start and End labels for a inlined scope!");
return 0;
}
@@ -1605,11 +1626,13 @@ DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
// FIXME : Handle getNumOperands != 3
if (DVInsn->getNumOperands() == 3) {
if (DVInsn->getOperand(0).isReg())
- updated = addRegisterAddress(VariableDie, DVLabel, DVInsn->getOperand(0));
+ updated =
+ addRegisterAddress(VariableDie, DVLabel, DVInsn->getOperand(0));
else if (DVInsn->getOperand(0).isImm())
updated = addConstantValue(VariableDie, DVLabel, DVInsn->getOperand(0));
else if (DVInsn->getOperand(0).isFPImm())
- updated = addConstantFPValue(VariableDie, DVLabel, DVInsn->getOperand(0));
+ updated =
+ addConstantFPValue(VariableDie, DVLabel, DVInsn->getOperand(0));
} else {
MachineLocation Location = Asm->getDebugValueLocation(DVInsn);
if (Location.getReg()) {
@@ -1682,8 +1705,13 @@ DIE *DwarfDebug::constructScopeDIE(DbgScope *Scope) {
if (Scope->getInlinedAt())
ScopeDIE = constructInlinedScopeDIE(Scope);
else if (DS.isSubprogram()) {
- if (Scope->isAbstractScope())
+ ProcessedSPNodes.insert(DS);
+ if (Scope->isAbstractScope()) {
ScopeDIE = getCompileUnit(DS)->getDIE(DS);
+ // Note down abstract DIE.
+ if (ScopeDIE)
+ AbstractSPDies.insert(std::make_pair(DS, ScopeDIE));
+ }
else
ScopeDIE = updateSubprogramScopeDIE(DS);
}
@@ -1782,11 +1810,11 @@ void DwarfDebug::constructCompileUnit(const MDNode *N) {
addString(Die, dwarf::DW_AT_name, dwarf::DW_FORM_string, FN);
// Use DW_AT_entry_pc instead of DW_AT_low_pc/DW_AT_high_pc pair. This
// simplifies debug range entries.
- addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_data4, 0);
+ addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_addr, 0);
// DW_AT_stmt_list is a offset of line number information for this
- // compile unit in debug_line section. It is always zero when only one
- // compile unit is emitted in one object file.
- addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
+ // compile unit in debug_line section. This offset is calculated
+ // during endMoudle().
+ addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
if (!Dir.empty())
addString(Die, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string, Dir);
@@ -1996,6 +2024,40 @@ void DwarfDebug::beginModule(Module *M) {
///
void DwarfDebug::endModule() {
if (!FirstCU) return;
+ const Module *M = MMI->getModule();
+ if (NamedMDNode *AllSPs = M->getNamedMetadata("llvm.dbg.sp")) {
+ for (unsigned SI = 0, SE = AllSPs->getNumOperands(); SI != SE; ++SI) {
+ if (ProcessedSPNodes.count(AllSPs->getOperand(SI)) != 0) continue;
+ DISubprogram SP(AllSPs->getOperand(SI));
+ if (!SP.Verify()) continue;
+
+ // Collect info for variables that were optimized out.
+ StringRef FName = SP.getLinkageName();
+ if (FName.empty())
+ FName = SP.getName();
+ NamedMDNode *NMD =
+ M->getNamedMetadata(Twine("llvm.dbg.lv.", getRealLinkageName(FName)));
+ if (!NMD) continue;
+ unsigned E = NMD->getNumOperands();
+ if (!E) continue;
+ DbgScope *Scope = new DbgScope(NULL, DIDescriptor(SP), NULL);
+ for (unsigned I = 0; I != E; ++I) {
+ DIVariable DV(NMD->getOperand(I));
+ if (!DV.Verify()) continue;
+ Scope->addVariable(new DbgVariable(DV));
+ }
+
+ // Construct subprogram DIE and add variables DIEs.
+ constructSubprogramDIE(SP);
+ DIE *ScopeDIE = getCompileUnit(SP)->getDIE(SP);
+ const SmallVector<DbgVariable *, 8> &Variables = Scope->getVariables();
+ for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
+ DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
+ if (VariableDIE)
+ ScopeDIE->addChild(VariableDIE);
+ }
+ }
+ }
// Attach DW_AT_inline attribute with inlined subprogram DIEs.
for (SmallPtrSet<DIE *, 4>::iterator AI = InlinedSubprogramDIEs.begin(),
@@ -2037,15 +2099,15 @@ void DwarfDebug::endModule() {
// Compute DIE offsets and sizes.
computeSizeAndOffsets();
+ // Emit source line correspondence into a debug line section.
+ emitDebugLines();
+
// Emit all the DIEs into a debug info section
emitDebugInfo();
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
- // Emit source line correspondence into a debug line section.
- emitDebugLines();
-
// Emit info into a debug pubnames section.
emitDebugPubNames();
@@ -2150,8 +2212,9 @@ static bool isDbgValueInDefinedReg(const MachineInstr *MI) {
}
/// collectVariableInfo - Populate DbgScope entries with variables' info.
-void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
- SmallPtrSet<const MDNode *, 16> Processed;
+void
+DwarfDebug::collectVariableInfo(const MachineFunction *MF,
+ SmallPtrSet<const MDNode *, 16> &Processed) {
/// collection info from MMI table.
collectVariableInfoFromMMITable(MF, Processed);
@@ -2180,16 +2243,23 @@ void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
if (Processed.count(DV) != 0)
continue;
+ const MachineInstr *PrevMI = MInsn;
for (SmallVector<const MachineInstr *, 8>::iterator MI = I+1,
ME = DbgValues.end(); MI != ME; ++MI) {
const MDNode *Var =
(*MI)->getOperand((*MI)->getNumOperands()-1).getMetadata();
- if (Var == DV && isDbgValueInDefinedReg(*MI))
+ if (Var == DV && isDbgValueInDefinedReg(*MI) &&
+ !PrevMI->isIdenticalTo(*MI))
MultipleValues.push_back(*MI);
+ PrevMI = *MI;
}
DbgScope *Scope = findDbgScope(MInsn);
- if (!Scope && DV.getTag() == dwarf::DW_TAG_arg_variable)
+ bool CurFnArg = false;
+ if (DV.getTag() == dwarf::DW_TAG_arg_variable &&
+ DISubprogram(DV.getContext()).describes(MF->getFunction()))
+ CurFnArg = true;
+ if (!Scope && CurFnArg)
Scope = CurrentFnDbgScope;
// If variable scope is not found then skip this variable.
if (!Scope)
@@ -2198,7 +2268,7 @@ void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
Processed.insert(DV);
DbgVariable *RegVar = new DbgVariable(DV);
Scope->addVariable(RegVar);
- if (DV.getTag() != dwarf::DW_TAG_arg_variable)
+ if (!CurFnArg)
DbgVariableLabelsMap[RegVar] = getLabelBeforeInsn(MInsn);
if (DbgVariable *AbsVar = findAbstractVariable(DV, MInsn->getDebugLoc())) {
DbgVariableToDbgInstMap[AbsVar] = MInsn;
@@ -2217,7 +2287,8 @@ void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
const MachineInstr *Begin = NULL;
const MachineInstr *End = NULL;
for (SmallVector<const MachineInstr *, 4>::iterator
- MVI = MultipleValues.begin(), MVE = MultipleValues.end(); MVI != MVE; ++MVI) {
+ MVI = MultipleValues.begin(), MVE = MultipleValues.end();
+ MVI != MVE; ++MVI) {
if (!Begin) {
Begin = *MVI;
continue;
@@ -2241,8 +2312,11 @@ void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
}
// Collect info for variables that were optimized out.
+ const Function *F = MF->getFunction();
+ const Module *M = F->getParent();
if (NamedMDNode *NMD =
- MF->getFunction()->getParent()->getNamedMetadata("llvm.dbg.lv")) {
+ M->getNamedMetadata(Twine("llvm.dbg.lv.",
+ getRealLinkageName(F->getName())))) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
DIVariable DV(cast_or_null<MDNode>(NMD->getOperand(i)));
if (!DV || !Processed.insert(DV))
@@ -2319,7 +2393,8 @@ void DwarfDebug::endScope(const MachineInstr *MI) {
}
/// getOrCreateDbgScope - Create DbgScope for the scope.
-DbgScope *DwarfDebug::getOrCreateDbgScope(const MDNode *Scope, const MDNode *InlinedAt) {
+DbgScope *DwarfDebug::getOrCreateDbgScope(const MDNode *Scope,
+ const MDNode *InlinedAt) {
if (!InlinedAt) {
DbgScope *WScope = DbgScopeMap.lookup(Scope);
if (WScope)
@@ -2335,13 +2410,20 @@ DbgScope *DwarfDebug::getOrCreateDbgScope(const MDNode *Scope, const MDNode *Inl
if (!WScope->getParent()) {
StringRef SPName = DISubprogram(Scope).getLinkageName();
- if (SPName == Asm->MF->getFunction()->getName())
+ // We used to check only for a linkage name, but that fails
+ // since we began omitting the linkage name for private
+ // functions. The new way is to check for the name in metadata,
+ // but that's not supported in old .ll test cases. Ergo, we
+ // check both.
+ if (SPName == Asm->MF->getFunction()->getName() ||
+ DISubprogram(Scope).getFunction() == Asm->MF->getFunction())
CurrentFnDbgScope = WScope;
}
return WScope;
}
+ getOrCreateAbstractScope(Scope);
DbgScope *WScope = DbgScopeMap.lookup(InlinedAt);
if (WScope)
return WScope;
@@ -2355,7 +2437,6 @@ DbgScope *DwarfDebug::getOrCreateDbgScope(const MDNode *Scope, const MDNode *Inl
Parent->addScope(WScope);
ConcreteScopes[InlinedAt] = WScope;
- getOrCreateAbstractScope(Scope);
return WScope;
}
@@ -2365,8 +2446,6 @@ DbgScope *DwarfDebug::getOrCreateDbgScope(const MDNode *Scope, const MDNode *Inl
static bool hasValidLocation(LLVMContext &Ctx,
const MachineInstr *MInsn,
const MDNode *&Scope, const MDNode *&InlinedAt) {
- if (MInsn->isDebugValue())
- return false;
DebugLoc DL = MInsn->getDebugLoc();
if (DL.isUnknown()) return false;
@@ -2488,7 +2567,8 @@ bool DwarfDebug::extractScopeInformation() {
// current instruction scope does not match scope of first instruction
// in this range then create a new instruction range.
DbgRange R(RangeBeginMI, PrevMI);
- MI2ScopeMap[RangeBeginMI] = getOrCreateDbgScope(PrevScope, PrevInlinedAt);
+ MI2ScopeMap[RangeBeginMI] = getOrCreateDbgScope(PrevScope,
+ PrevInlinedAt);
MIRanges.push_back(R);
}
@@ -2565,7 +2645,6 @@ void DwarfDebug::identifyScopeMarkers() {
RE = Ranges.end(); RI != RE; ++RI) {
assert(RI->first && "DbgRange does not have first instruction!");
assert(RI->second && "DbgRange does not have second instruction!");
- InsnsBeginScopeSet.insert(RI->first);
InsnsEndScopeSet.insert(RI->second);
}
}
@@ -2616,6 +2695,9 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
recordSourceLine(Line, Col, Scope);
+ /// ProcessedArgs - Collection of arguments already processed.
+ SmallPtrSet<const MDNode *, 8> ProcessedArgs;
+
DebugLoc PrevLoc;
for (MachineFunction::const_iterator I = MF->begin(), E = MF->end();
I != E; ++I)
@@ -2624,14 +2706,19 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) {
const MachineInstr *MI = II;
DebugLoc DL = MI->getDebugLoc();
if (MI->isDebugValue()) {
- // DBG_VALUE needs a label if the variable is local variable or
- // an argument whose location is changing.
assert (MI->getNumOperands() > 1 && "Invalid machine instruction!");
DIVariable DV(MI->getOperand(MI->getNumOperands() - 1).getMetadata());
if (!DV.Verify()) continue;
- if (DV.getTag() != dwarf::DW_TAG_arg_variable)
+ // If DBG_VALUE is for a local variable then it needs a label.
+ if (DV.getTag() != dwarf::DW_TAG_arg_variable
+ && isDbgValueInUndefinedReg(MI) == false)
InsnNeedsLabel.insert(MI);
- else if (!ProcessedArgs.insert(DV))
+ // DBG_VALUE for inlined functions argument needs a label.
+ else if (!DISubprogram(getDISubprogram(DV.getContext())).
+ describes(MF->getFunction()))
+ InsnNeedsLabel.insert(MI);
+ // DBG_VALUE indicating argument location change needs a label.
+ else if (isDbgValueInUndefinedReg(MI) == false && !ProcessedArgs.insert(DV))
InsnNeedsLabel.insert(MI);
} else {
// If location is unknown then instruction needs a location only if
@@ -2664,7 +2751,8 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
// Assumes in correct section after the entry point.
Asm->OutStreamer.EmitLabel(FunctionEndSym);
- collectVariableInfo(MF);
+ SmallPtrSet<const MDNode *, 16> ProcessedVars;
+ collectVariableInfo(MF, ProcessedVars);
// Get function line info.
if (!Lines.empty()) {
@@ -2679,9 +2767,31 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
// Construct abstract scopes.
for (SmallVector<DbgScope *, 4>::iterator AI = AbstractScopesList.begin(),
- AE = AbstractScopesList.end(); AI != AE; ++AI)
- constructScopeDIE(*AI);
-
+ AE = AbstractScopesList.end(); AI != AE; ++AI) {
+ DISubprogram SP((*AI)->getScopeNode());
+ if (SP.Verify()) {
+ // Collect info for variables that were optimized out.
+ StringRef FName = SP.getLinkageName();
+ if (FName.empty())
+ FName = SP.getName();
+ const Module *M = MF->getFunction()->getParent();
+ if (NamedMDNode *NMD =
+ M->getNamedMetadata(Twine("llvm.dbg.lv.",
+ getRealLinkageName(FName)))) {
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ DIVariable DV(cast_or_null<MDNode>(NMD->getOperand(i)));
+ if (!DV || !ProcessedVars.insert(DV))
+ continue;
+ DbgScope *Scope = AbstractScopes.lookup(DV.getContext());
+ if (Scope)
+ Scope->addVariable(new DbgVariable(DV));
+ }
+ }
+ }
+ if (ProcessedSPNodes.count((*AI)->getScopeNode()) == 0)
+ constructScopeDIE(*AI);
+ }
+
DIE *CurFnDIE = constructScopeDIE(CurrentFnDbgScope);
if (!DisableFramePointerElim(*MF))
@@ -2696,13 +2806,11 @@ void DwarfDebug::endFunction(const MachineFunction *MF) {
// Clear debug info
CurrentFnDbgScope = NULL;
InsnNeedsLabel.clear();
- ProcessedArgs.clear();
DbgVariableToFrameIndexMap.clear();
VarToAbstractVarMap.clear();
DbgVariableToDbgInstMap.clear();
DbgVariableLabelsMap.clear();
DeleteContainerSeconds(DbgScopeMap);
- InsnsBeginScopeSet.clear();
InsnsEndScopeSet.clear();
ConcreteScopes.clear();
DeleteContainerSeconds(AbstractScopes);
@@ -2764,7 +2872,8 @@ DbgScope *DwarfDebug::findDbgScope(const MachineInstr *MInsn) {
/// recordSourceLine - Register a source line with debug info. Returns the
/// unique label that was emitted and which provides correspondence to
/// the source line list.
-MCSymbol *DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S) {
+MCSymbol *DwarfDebug::recordSourceLine(unsigned Line, unsigned Col,
+ const MDNode *S) {
StringRef Dir;
StringRef Fn;
@@ -2790,6 +2899,16 @@ MCSymbol *DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode
Src = GetOrCreateSourceID(Dir, Fn);
}
+#if 0
+ if (!Lines.empty()) {
+ SrcLineInfo lastSrcLineInfo = Lines.back();
+ // Emitting sequential line records with the same line number (but
+ // different addresses) seems to confuse GDB. Avoid this.
+ if (lastSrcLineInfo.getLine() == Line)
+ return NULL;
+ }
+#endif
+
MCSymbol *Label = MMI->getContext().CreateTempSymbol();
Lines.push_back(SrcLineInfo(Line, Col, Src, Label));
@@ -2898,7 +3017,8 @@ void DwarfDebug::EmitSectionLabels() {
if (const MCSection *MacroInfo = TLOF.getDwarfMacroInfoSection())
EmitSectionSym(Asm, MacroInfo);
- EmitSectionSym(Asm, TLOF.getDwarfLineSection());
+ DwarfDebugLineSectionSym =
+ EmitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
EmitSectionSym(Asm, TLOF.getDwarfLocSection());
EmitSectionSym(Asm, TLOF.getDwarfPubNamesSection());
EmitSectionSym(Asm, TLOF.getDwarfPubTypesSection());
@@ -2961,6 +3081,11 @@ void DwarfDebug::emitDIE(DIE *Die) {
4);
break;
}
+ case dwarf::DW_AT_stmt_list: {
+ Asm->EmitLabelDifference(CurrentLineSectionSym,
+ DwarfDebugLineSectionSym, 4);
+ break;
+ }
case dwarf::DW_AT_location: {
if (UseDotDebugLocEntry.count(Die) != 0) {
DIELabel *L = cast<DIELabel>(Values[i]);
@@ -3106,6 +3231,8 @@ void DwarfDebug::emitDebugLines() {
Asm->getObjFileLowering().getDwarfLineSection());
// Construct the section header.
+ CurrentLineSectionSym = Asm->GetTempSymbol("section_line_begin");
+ Asm->OutStreamer.EmitLabel(CurrentLineSectionSym);
Asm->OutStreamer.AddComment("Length of Source Line Info");
Asm->EmitLabelDifference(Asm->GetTempSymbol("line_end"),
Asm->GetTempSymbol("line_begin"), 4);
@@ -3491,8 +3618,9 @@ void DwarfDebug::emitDebugLoc() {
unsigned char Size = Asm->getTargetData().getPointerSize();
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1;
- for (SmallVector<DotDebugLocEntry, 4>::iterator I = DotDebugLocEntries.begin(),
- E = DotDebugLocEntries.end(); I != E; ++I, ++index) {
+ for (SmallVector<DotDebugLocEntry, 4>::iterator
+ I = DotDebugLocEntries.begin(), E = DotDebugLocEntries.end();
+ I != E; ++I, ++index) {
DotDebugLocEntry Entry = *I;
if (Entry.isEmpty()) {
Asm->OutStreamer.EmitIntValue(0, Size, /*addrspace*/0);
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
index 0d6116f..5a281c8 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -156,6 +156,9 @@ class DwarfDebug {
/// not included DbgScopeMap. AbstractScopes owns its DbgScope*s.
DenseMap<const MDNode *, DbgScope *> AbstractScopes;
+ /// AbstractSPDies - Collection of abstract subprogram DIEs.
+ DenseMap<const MDNode *, DIE *> AbstractSPDies;
+
/// AbstractScopesList - Tracks abstract scopes constructed while processing
/// a function. This list is cleared during endFunction().
SmallVector<DbgScope *, 4>AbstractScopesList;
@@ -210,7 +213,7 @@ class DwarfDebug {
DenseMap<DIE *, const MDNode *> ContainingTypeMap;
typedef SmallVector<DbgScope *, 2> ScopeVector;
- SmallPtrSet<const MachineInstr *, 8> InsnsBeginScopeSet;
+
SmallPtrSet<const MachineInstr *, 8> InsnsEndScopeSet;
/// InlineInfo - Keep track of inlined functions and their location. This
@@ -219,6 +222,10 @@ class DwarfDebug {
DenseMap<const MDNode *, SmallVector<InlineInfoLabels, 4> > InlineInfo;
SmallVector<const MDNode *, 4> InlinedSPNodes;
+ // ProcessedSPNodes - This is a collection of subprogram MDNodes that
+ // are processed to create DIEs.
+ SmallPtrSet<const MDNode *, 16> ProcessedSPNodes;
+
/// LabelsBeforeInsn - Maps instruction with label emitted before
/// instruction.
DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
@@ -231,9 +238,6 @@ class DwarfDebug {
/// a debuggging information entity.
SmallPtrSet<const MachineInstr *, 8> InsnNeedsLabel;
- /// ProcessedArgs - Collection of arguments already processed.
- SmallPtrSet<const MDNode *, 8> ProcessedArgs;
-
SmallVector<const MCSymbol *, 8> DebugRangeSymbols;
/// Previous instruction's location information. This is used to determine
@@ -257,7 +261,10 @@ class DwarfDebug {
MCSymbol *DwarfFrameSectionSym, *DwarfInfoSectionSym, *DwarfAbbrevSectionSym;
MCSymbol *DwarfStrSectionSym, *TextSectionSym, *DwarfDebugRangeSectionSym;
MCSymbol *DwarfDebugLocSectionSym;
+ MCSymbol *DwarfDebugLineSectionSym, *CurrentLineSectionSym;
MCSymbol *FunctionBeginSym, *FunctionEndSym;
+
+ DIEInteger *DIEIntegerOne;
private:
/// getSourceDirectoryAndFileIds - Return the directory and file ids that
@@ -593,7 +600,8 @@ private:
bool extractScopeInformation();
/// collectVariableInfo - Populate DbgScope entries with variables' info.
- void collectVariableInfo(const MachineFunction *);
+ void collectVariableInfo(const MachineFunction *,
+ SmallPtrSet<const MDNode *, 16> &ProcessedVars);
/// collectVariableInfoFromMMITable - Collect variable information from
/// side table maintained by MMI.
diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index f92127f..c8a63cf 100644
--- a/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -52,13 +52,13 @@ static void EmitCamlGlobal(const Module &M, AsmPrinter &AP, const char *Id) {
SymName.append(MId.begin(), std::find(MId.begin(), MId.end(), '.'));
SymName += "__";
SymName += Id;
-
+
// Capitalize the first letter of the module name.
SymName[Letter] = toupper(SymName[Letter]);
-
+
SmallString<128> TmpStr;
AP.Mang->getNameWithPrefix(TmpStr, SymName);
-
+
MCSymbol *Sym = AP.OutContext.GetOrCreateSymbol(TmpStr);
AP.OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global);
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
index 9dec22e..7f98df0 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp
@@ -358,23 +358,10 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1,
}
/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
-/// after it, replacing it with an unconditional branch to NewDest. This
-/// returns true if OldInst's block is modified, false if NewDest is modified.
+/// after it, replacing it with an unconditional branch to NewDest.
void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
MachineBasicBlock *NewDest) {
- MachineBasicBlock *OldBB = OldInst->getParent();
-
- // Remove all the old successors of OldBB from the CFG.
- while (!OldBB->succ_empty())
- OldBB->removeSuccessor(OldBB->succ_begin());
-
- // Remove all the dead instructions from the end of OldBB.
- OldBB->erase(OldInst, OldBB->end());
-
- // If OldBB isn't immediately before OldBB, insert a branch to it.
- if (++MachineFunction::iterator(OldBB) != MachineFunction::iterator(NewDest))
- TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>());
- OldBB->addSuccessor(NewDest);
+ TII->ReplaceTailWithBranchTo(OldInst, NewDest);
++NumTailMerge;
}
@@ -383,6 +370,9 @@ void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
/// iterator. This returns the new MBB.
MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1) {
+ if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
+ return 0;
+
MachineFunction &MF = *CurMBB.getParent();
// Create the fall-through block.
@@ -443,18 +433,20 @@ static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB));
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
!TII->AnalyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
MachineBasicBlock *NextBB = I;
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
- TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond);
+ TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond, dl);
return;
}
}
}
- TII->InsertBranch(*CurMBB, SuccBB, NULL, SmallVector<MachineOperand, 0>());
+ TII->InsertBranch(*CurMBB, SuccBB, NULL,
+ SmallVector<MachineOperand, 0>(), dl);
}
bool
@@ -625,9 +617,10 @@ void BranchFolder::RemoveBlocksWithHash(unsigned CurHash,
/// CreateCommonTailOnlyBlock - None of the blocks to be tail-merged consist
/// only of the common tail. Create a block that does by splitting one.
-unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
- unsigned maxCommonTailLength) {
- unsigned commonTailIndex = 0;
+bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+ unsigned maxCommonTailLength,
+ unsigned &commonTailIndex) {
+ commonTailIndex = 0;
unsigned TimeEstimate = ~0U;
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
// Use PredBB if possible; that doesn't require a new branch.
@@ -655,6 +648,11 @@ unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
<< maxCommonTailLength);
MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI);
+ if (!newMBB) {
+ DEBUG(dbgs() << "... failed!");
+ return false;
+ }
+
SameTails[commonTailIndex].setBlock(newMBB);
SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
@@ -662,7 +660,7 @@ unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
if (PredBB == MBB)
PredBB = newMBB;
- return commonTailIndex;
+ return true;
}
// See if any of the blocks in MergePotentials (which all have a common single
@@ -757,7 +755,11 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
!SameTails[commonTailIndex].tailIsWholeBlock())) {
// None of the blocks consist entirely of the common tail.
// Split a block so that one does.
- commonTailIndex = CreateCommonTailOnlyBlock(PredBB, maxCommonTailLength);
+ if (!CreateCommonTailOnlyBlock(PredBB,
+ maxCommonTailLength, commonTailIndex)) {
+ RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
+ continue;
+ }
}
MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();
@@ -874,10 +876,11 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
}
// Remove the unconditional branch at the end, if any.
if (TBB && (Cond.empty() || FBB)) {
+ DebugLoc dl; // FIXME: this is nowhere
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
- TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond);
+ TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
}
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
@@ -976,6 +979,7 @@ static bool IsBetterFallthrough(MachineBasicBlock *MBB1,
bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
bool MadeChange = false;
MachineFunction &MF = *MBB->getParent();
+ DebugLoc dl; // FIXME: this is nowhere
ReoptimizeBlock:
MachineFunction::iterator FallThrough = MBB;
@@ -1027,7 +1031,7 @@ ReoptimizeBlock:
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1066,7 +1070,7 @@ ReoptimizeBlock:
// the condition is false, remove the uncond second branch.
if (PriorFBB == MBB) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1079,7 +1083,7 @@ ReoptimizeBlock:
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1116,7 +1120,7 @@ ReoptimizeBlock:
<< "To make fallthrough to: " << *PriorTBB << "\n");
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
@@ -1145,7 +1149,7 @@ ReoptimizeBlock:
SmallVector<MachineOperand, 4> NewCond(CurCond);
if (!TII->ReverseBranchCondition(NewCond)) {
TII->RemoveBranch(*MBB);
- TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond);
+ TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1200,7 +1204,7 @@ ReoptimizeBlock:
PriorFBB = MBB;
}
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, dl);
}
// Iterate through all the predecessors, revectoring each in-turn.
@@ -1226,7 +1230,7 @@ ReoptimizeBlock:
if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
- TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond);
+ TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, dl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
@@ -1246,7 +1250,7 @@ ReoptimizeBlock:
}
// Add the branch back if the block is more than just an uncond branch.
- TII->InsertBranch(*MBB, CurTBB, 0, CurCond);
+ TII->InsertBranch(*MBB, CurTBB, 0, CurCond, dl);
}
}
@@ -1286,7 +1290,7 @@ ReoptimizeBlock:
if (CurFallsThru) {
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
CurCond.clear();
- TII->InsertBranch(*MBB, NextBB, 0, CurCond);
+ TII->InsertBranch(*MBB, NextBB, 0, CurCond, dl);
}
MBB->moveAfter(PredBB);
MadeChange = true;
diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.h b/contrib/llvm/lib/CodeGen/BranchFolding.h
index b087395..15dfa7f 100644
--- a/contrib/llvm/lib/CodeGen/BranchFolding.h
+++ b/contrib/llvm/lib/CodeGen/BranchFolding.h
@@ -102,8 +102,9 @@ namespace llvm {
MachineBasicBlock *PredBB);
void RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock* SuccBB,
MachineBasicBlock* PredBB);
- unsigned CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
- unsigned maxCommonTailLength);
+ bool CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+ unsigned maxCommonTailLength,
+ unsigned &commonTailIndex);
bool OptimizeBranches(MachineFunction &MF);
bool OptimizeBlock(MachineBasicBlock *MBB);
diff --git a/contrib/llvm/lib/CodeGen/CMakeLists.txt b/contrib/llvm/lib/CodeGen/CMakeLists.txt
index 3e38872..ffeff1e 100644
--- a/contrib/llvm/lib/CodeGen/CMakeLists.txt
+++ b/contrib/llvm/lib/CodeGen/CMakeLists.txt
@@ -1,19 +1,20 @@
add_llvm_library(LLVMCodeGen
- Analysis.cpp
AggressiveAntiDepBreaker.cpp
+ Analysis.cpp
BranchFolding.cpp
CalcSpillWeights.cpp
+ CallingConvLower.cpp
CodePlacementOpt.cpp
CriticalAntiDepBreaker.cpp
DeadMachineInstructionElim.cpp
DwarfEHPrepare.cpp
ELFCodeEmitter.cpp
ELFWriter.cpp
- ExactHazardRecognizer.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCStrategy.cpp
IfConversion.cpp
+ InlineSpiller.cpp
IntrinsicLowering.cpp
LLVMTargetMachine.cpp
LatencyPriorityQueue.cpp
@@ -45,6 +46,7 @@ add_llvm_library(LLVMCodeGen
OptimizePHIs.cpp
PHIElimination.cpp
Passes.cpp
+ PostRAHazardRecognizer.cpp
PostRASchedulerList.cpp
PreAllocSplitting.cpp
ProcessImplicitDefs.cpp
@@ -52,7 +54,6 @@ add_llvm_library(LLVMCodeGen
PseudoSourceValue.cpp
RegAllocFast.cpp
RegAllocLinearScan.cpp
- RegAllocLocal.cpp
RegAllocPBQP.cpp
RegisterCoalescer.cpp
RegisterScavenging.cpp
diff --git a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
index a328d0e..240a7b9 100644
--- a/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/contrib/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -116,7 +116,7 @@ bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) {
SmallVector<LiveInterval*, 4> spillIs;
if (lis->isReMaterializable(li, spillIs, isLoad)) {
// If all of the definitions of the interval are re-materializable,
- // it is a preferred candidate for spilling. If non of the defs are
+ // it is a preferred candidate for spilling. If none of the defs are
// loads, then it's potentially very cheap to re-materialize.
// FIXME: this gets much more complicated once we support non-trivial
// re-materialization.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
index 4e6c1fc..62ad817 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/CallingConvLower.cpp
+++ b/contrib/llvm/lib/CodeGen/CallingConvLower.cpp
@@ -80,13 +80,12 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
/// CheckReturn - Analyze the return values of a function, returning true if
/// the return can be performed without sret-demotion, and false otherwise.
-bool CCState::CheckReturn(const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
- for (unsigned i = 0, e = OutTys.size(); i != e; ++i) {
- EVT VT = OutTys[i];
- ISD::ArgFlagsTy ArgFlags = ArgsFlags[i];
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ EVT VT = Outs[i].VT;
+ ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
return false;
}
@@ -99,7 +98,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- EVT VT = Outs[i].Val.getValueType();
+ EVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
@@ -111,14 +110,13 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
}
}
-
/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
/// incorporating info about the passed values into this state.
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
unsigned NumOps = Outs.size();
for (unsigned i = 0; i != NumOps; ++i) {
- EVT ArgVT = Outs[i].Val.getValueType();
+ EVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
#ifndef NDEBUG
diff --git a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
index 3ff2a04..e0e315c 100644
--- a/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
+++ b/contrib/llvm/lib/CodeGen/CodePlacementOpt.cpp
@@ -178,6 +178,8 @@ bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF,
continue;
// Move the block.
+ DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << Pred->getNumber()
+ << " to top of loop.\n");
Changed = true;
// Move it and all the blocks that can reach it via fallthrough edges
@@ -297,6 +299,8 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF,
continue;
// Move the block.
+ DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << BB->getNumber()
+ << " to be contiguous with loop.\n");
Changed = true;
// Process this block and all loop blocks contiguous with it, to keep
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
index fd957b1..e3746a9 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -29,6 +30,7 @@ CriticalAntiDepBreaker::
CriticalAntiDepBreaker(MachineFunction& MFi) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF))
{
@@ -71,25 +73,27 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -164,6 +168,26 @@ static const SDep *CriticalPathStep(const SUnit *SU) {
}
void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
+ // It's not safe to change register allocation for source operands of
+ // that have special allocation requirements. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register operands for this instruction and update
// Classes and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -199,9 +223,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
- // It's not safe to change register allocation for source operands of
- // that have special allocation requirements.
- if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (MO.isUse() && Special) {
if (KeepRegs.insert(Reg)) {
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
@@ -216,38 +238,43 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
// Update liveness.
// Proceding upwards, registers that are defed but not used in this
// instruction are now dead.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegTiedToUseOperand(i)) continue;
-
- DefIndices[Reg] = Count;
- KillIndices[Reg] = ~0u;
- assert(((KillIndices[Reg] == ~0u) !=
- (DefIndices[Reg] == ~0u)) &&
- "Kill and Def maps aren't consistent for Reg!");
- KeepRegs.erase(Reg);
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = ~0u;
- KeepRegs.erase(SubregReg);
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
- }
- // Conservatively mark super-registers as unusable.
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
+ if (!TII->isPredicated(MI)) {
+ // Predicated defs are modeled as read + write, i.e. similar to two
+ // address updates.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
+
+ DefIndices[Reg] = Count;
+ KillIndices[Reg] = ~0u;
+ assert(((KillIndices[Reg] == ~0u) !=
+ (DefIndices[Reg] == ~0u)) &&
+ "Kill and Def maps aren't consistent for Reg!");
+ KeepRegs.erase(Reg);
+ Classes[Reg] = 0;
+ RegRefs.erase(Reg);
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ unsigned SubregReg = *Subreg;
+ DefIndices[SubregReg] = Count;
+ KillIndices[SubregReg] = ~0u;
+ KeepRegs.erase(SubregReg);
+ Classes[SubregReg] = 0;
+ RegRefs.erase(SubregReg);
+ }
+ // Conservatively mark super-registers as unusable.
+ for (const unsigned *Super = TRI->getSuperRegisters(Reg);
+ *Super; ++Super) {
+ unsigned SuperReg = *Super;
+ Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ }
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -334,10 +361,15 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// so just duck out immediately if the block is empty.
if (SUnits.empty()) return 0;
+ // Keep a map of the MachineInstr*'s back to the SUnit representing them.
+ // This is used for updating debug information.
+ DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
+
// Find the node at the bottom of the critical path.
const SUnit *Max = 0;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
+ MISUnitMap[SU->getInstr()] = SU;
if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
Max = SU;
}
@@ -473,7 +505,11 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
PrescanInstruction(MI);
- if (MI->getDesc().hasExtraDefRegAllocReq())
+ // If MI's defs have a special allocation requirement, don't allow
+ // any def registers to be changed. Also assume all registers
+ // defined in a call must not be changed (ABI).
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
AntiDepReg = 0;
@@ -485,7 +521,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- if (MO.isUse() && AntiDepReg == Reg) {
+ if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
AntiDepReg = 0;
break;
}
@@ -519,8 +555,22 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
std::multimap<unsigned, MachineOperand *>::iterator>
Range = RegRefs.equal_range(AntiDepReg);
for (std::multimap<unsigned, MachineOperand *>::iterator
- Q = Range.first, QE = Range.second; Q != QE; ++Q)
+ Q = Range.first, QE = Range.second; Q != QE; ++Q) {
Q->second->setReg(NewReg);
+ // If the SU for the instruction being updated has debug information
+ // related to the anti-dependency register, make sure to update that
+ // as well.
+ const SUnit *SU = MISUnitMap[Q->second->getParent()];
+ if (!SU) continue;
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) {
+ MachineInstr *DI = SU->DbgInstrList[i];
+ assert (DI->getNumOperands()==3 && DI->getOperand(0).isReg() &&
+ DI->getOperand(0).getReg()
+ && "Non register dbg_value attached to SUnit!");
+ if (DI->getOperand(0).getReg() == AntiDepReg)
+ DI->getOperand(0).setReg(NewReg);
+ }
+ }
// We just went back in time and modified history; the
// liveness information for the anti-depenence reg is now
diff --git a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
index cc42dd2..5406300 100644
--- a/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/contrib/llvm/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -22,15 +22,18 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include <map>
namespace llvm {
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
class CriticalAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
diff --git a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
index f6739f4..01b31b4 100644
--- a/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/DwarfEHPrepare.cpp
@@ -22,6 +22,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
@@ -87,10 +88,13 @@ namespace {
/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
/// use the ".llvm.eh.catch.all.value" call need to convert to using its
/// initializer instead.
- bool CleanupSelectors();
+ bool CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
+
+ bool HasCatchAllInSelector(IntrinsicInst *);
/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
- void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
+ void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+ SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels);
/// FindAllURoRInvokes - Find all URoR invokes in the function.
void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
@@ -150,7 +154,7 @@ namespace {
Changed = true;
}
- return false;
+ return Changed;
}
public:
@@ -186,25 +190,32 @@ FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm, bool fast) {
return new DwarfEHPrepare(tm, fast);
}
+/// HasCatchAllInSelector - Return true if the intrinsic instruction has a
+/// catch-all.
+bool DwarfEHPrepare::HasCatchAllInSelector(IntrinsicInst *II) {
+ if (!EHCatchAllValue) return false;
+
+ unsigned ArgIdx = II->getNumArgOperands() - 1;
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(II->getArgOperand(ArgIdx));
+ return GV == EHCatchAllValue;
+}
+
/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
void DwarfEHPrepare::
-FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
+FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+ SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
for (Value::use_iterator
I = SelectorIntrinsic->use_begin(),
E = SelectorIntrinsic->use_end(); I != E; ++I) {
- IntrinsicInst *SI = cast<IntrinsicInst>(I);
- if (!SI || SI->getParent()->getParent() != F) continue;
-
- unsigned NumOps = SI->getNumOperands();
- if (NumOps > 4) continue;
- bool IsCleanUp = (NumOps == 3);
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
- if (!IsCleanUp)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getOperand(3)))
- IsCleanUp = (CI->getZExtValue() == 0);
+ if (II->getParent()->getParent() != F)
+ continue;
- if (IsCleanUp)
- Sels.insert(SI);
+ if (!HasCatchAllInSelector(II))
+ Sels.insert(II);
+ else
+ CatchAllSels.insert(II);
}
}
@@ -222,7 +233,7 @@ FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes) {
/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
/// the ".llvm.eh.catch.all.value" call need to convert to using its
/// initializer instead.
-bool DwarfEHPrepare::CleanupSelectors() {
+bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
if (!EHCatchAllValue) return false;
if (!SelectorIntrinsic) {
@@ -232,17 +243,15 @@ bool DwarfEHPrepare::CleanupSelectors() {
}
bool Changed = false;
- for (Value::use_iterator
- I = SelectorIntrinsic->use_begin(),
- E = SelectorIntrinsic->use_end(); I != E; ++I) {
- IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(I);
- if (!Sel || Sel->getParent()->getParent() != F) continue;
+ for (SmallPtrSet<IntrinsicInst*, 32>::iterator
+ I = Sels.begin(), E = Sels.end(); I != E; ++I) {
+ IntrinsicInst *Sel = *I;
// Index of the ".llvm.eh.catch.all.value" variable.
- unsigned OpIdx = Sel->getNumOperands() - 1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getOperand(OpIdx));
+ unsigned OpIdx = Sel->getNumArgOperands() - 1;
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
if (GV != EHCatchAllValue) continue;
- Sel->setOperand(OpIdx, EHCatchAllValue->getInitializer());
+ Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
Changed = true;
}
@@ -293,8 +302,6 @@ DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
/// function. This is a candidate to merge the selector associated with the URoR
/// invoke with the one from the URoR's landing pad.
bool DwarfEHPrepare::HandleURoRInvokes() {
- if (!DT) return CleanupSelectors(); // We require DominatorTree information.
-
if (!EHCatchAllValue) {
EHCatchAllValue =
F->getParent()->getNamedGlobal(".llvm.eh.catch.all.value");
@@ -307,14 +314,20 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
if (!SelectorIntrinsic) return false;
}
+ SmallPtrSet<IntrinsicInst*, 32> Sels;
+ SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
+ FindAllCleanupSelectors(Sels, CatchAllSels);
+
+ if (!DT)
+ // We require DominatorTree information.
+ return CleanupSelectors(CatchAllSels);
+
if (!URoR) {
URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
- if (!URoR) return CleanupSelectors();
+ if (!URoR) return CleanupSelectors(CatchAllSels);
}
- SmallPtrSet<IntrinsicInst*, 32> Sels;
SmallPtrSet<InvokeInst*, 32> URoRInvokes;
- FindAllCleanupSelectors(Sels);
FindAllURoRInvokes(URoRInvokes);
SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
@@ -340,7 +353,8 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
if (!ExceptionValueIntrinsic) {
ExceptionValueIntrinsic =
Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
- if (!ExceptionValueIntrinsic) return CleanupSelectors();
+ if (!ExceptionValueIntrinsic)
+ return CleanupSelectors(CatchAllSels);
}
for (Value::use_iterator
@@ -360,21 +374,9 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
// an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
// need to convert it to a 'catch-all'.
for (SmallPtrSet<IntrinsicInst*, 8>::iterator
- SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI) {
- IntrinsicInst *II = *SI;
- unsigned NumOps = II->getNumOperands();
-
- if (NumOps <= 4) {
- bool IsCleanUp = (NumOps == 3);
-
- if (!IsCleanUp)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(3)))
- IsCleanUp = (CI->getZExtValue() == 0);
-
- if (IsCleanUp)
- SelsToConvert.insert(II);
- }
- }
+ SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
+ if (!HasCatchAllInSelector(*SI))
+ SelsToConvert.insert(*SI);
}
}
}
@@ -388,12 +390,22 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
SI = SelsToConvert.begin(), SE = SelsToConvert.end();
SI != SE; ++SI) {
IntrinsicInst *II = *SI;
- SmallVector<Value*, 8> Args;
// Use the exception object pointer and the personality function
// from the original selector.
- Args.push_back(II->getOperand(1)); // Exception object pointer.
- Args.push_back(II->getOperand(2)); // Personality function.
+ CallSite CS(II);
+ IntrinsicInst::op_iterator I = CS.arg_begin();
+ IntrinsicInst::op_iterator E = CS.arg_end();
+ IntrinsicInst::op_iterator B = prior(E);
+
+ // Exclude last argument if it is an integer.
+ if (isa<ConstantInt>(B)) E = B;
+
+ // Add exception object pointer (front).
+ // Add personality function (next).
+ // Add in any filter IDs (rest).
+ SmallVector<Value*, 8> Args(I, E);
+
Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
CallInst *NewSelector =
@@ -409,7 +421,7 @@ bool DwarfEHPrepare::HandleURoRInvokes() {
}
}
- Changed |= CleanupSelectors();
+ Changed |= CleanupSelectors(CatchAllSels);
return Changed;
}
diff --git a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp b/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp
index 8416d3b..36b0e65 100644
--- a/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/ELFCodeEmitter.cpp
@@ -90,7 +90,7 @@ bool ELFCodeEmitter::finishFunction(MachineFunction &MF) {
for (std::vector<MachineRelocation>::iterator MRI = JTRelocations.begin(),
MRE = JTRelocations.end(); MRI != MRE; ++MRI) {
MachineRelocation &MR = *MRI;
- unsigned MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
+ uintptr_t MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
MR.setResultPointer((void*)MBBOffset);
MR.setConstantVal(ES->SectionIdx);
JTSection.addRelocation(MR);
diff --git a/contrib/llvm/lib/CodeGen/ExactHazardRecognizer.h b/contrib/llvm/lib/CodeGen/ExactHazardRecognizer.h
deleted file mode 100644
index 91c81a9..0000000
--- a/contrib/llvm/lib/CodeGen/ExactHazardRecognizer.h
+++ /dev/null
@@ -1,86 +0,0 @@
-//=- llvm/CodeGen/ExactHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ExactHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-namespace llvm {
- class ExactHazardRecognizer : public ScheduleHazardRecognizer {
- // ScoreBoard to track function unit usage. ScoreBoard[0] is a
- // mask of the FUs in use in the cycle currently being
- // schedule. ScoreBoard[1] is a mask for the next cycle. The
- // ScoreBoard is used as a circular buffer with the current cycle
- // indicated by Head.
- class ScoreBoard {
- unsigned *Data;
-
- // The maximum number of cycles monitored by the Scoreboard. This
- // value is determined based on the target itineraries to ensure
- // that all hazards can be tracked.
- size_t Depth;
- // Indices into the Scoreboard that represent the current cycle.
- size_t Head;
- public:
- ScoreBoard():Data(NULL), Depth(0), Head(0) { }
- ~ScoreBoard() {
- delete[] Data;
- }
-
- size_t getDepth() const { return Depth; }
- unsigned& operator[](size_t idx) const {
- assert(Depth && "ScoreBoard was not initialized properly!");
-
- return Data[(Head + idx) % Depth];
- }
-
- void reset(size_t d = 1) {
- if (Data == NULL) {
- Depth = d;
- Data = new unsigned[Depth];
- }
-
- memset(Data, 0, Depth * sizeof(Data[0]));
- Head = 0;
- }
-
- void advance() {
- Head = (Head + 1) % Depth;
- }
-
- // Print the scoreboard.
- void dump() const;
- };
-
- // Itinerary data for the target.
- const InstrItineraryData &ItinData;
-
- ScoreBoard ReservedScoreboard;
- ScoreBoard RequiredScoreboard;
-
- public:
- ExactHazardRecognizer(const InstrItineraryData &ItinData);
-
- virtual HazardType getHazardType(SUnit *SU);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/GCStrategy.cpp b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
index 790cb21..71506cc 100644
--- a/contrib/llvm/lib/CodeGen/GCStrategy.cpp
+++ b/contrib/llvm/lib/CodeGen/GCStrategy.cpp
@@ -271,7 +271,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
case Intrinsic::gcwrite:
if (LowerWr) {
// Replace a write barrier with a simple store.
- Value *St = new StoreInst(CI->getOperand(1), CI->getOperand(3), CI);
+ Value *St = new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI);
CI->replaceAllUsesWith(St);
CI->eraseFromParent();
}
@@ -279,7 +279,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
case Intrinsic::gcread:
if (LowerRd) {
// Replace a read barrier with a simple load.
- Value *Ld = new LoadInst(CI->getOperand(2), "", CI);
+ Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
Ld->takeName(CI);
CI->replaceAllUsesWith(Ld);
CI->eraseFromParent();
@@ -290,7 +290,7 @@ bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
// Initialize the GC root, but do not delete the intrinsic. The
// backend needs the intrinsic to flag the stack slot.
Roots.push_back(cast<AllocaInst>(
- CI->getOperand(1)->stripPointerCasts()));
+ CI->getArgOperand(0)->stripPointerCasts()));
}
break;
default:
diff --git a/contrib/llvm/lib/CodeGen/IfConversion.cpp b/contrib/llvm/lib/CodeGen/IfConversion.cpp
index c61fd17..6b445e0 100644
--- a/contrib/llvm/lib/CodeGen/IfConversion.cpp
+++ b/contrib/llvm/lib/CodeGen/IfConversion.cpp
@@ -20,6 +20,7 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -33,20 +34,22 @@ using namespace llvm;
static cl::opt<int> IfCvtFnStart("ifcvt-fn-start", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtFnStop("ifcvt-fn-stop", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtLimit("ifcvt-limit", cl::init(-1), cl::Hidden);
-static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
+static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
+static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
+static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
+static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
+static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
+static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
+static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
cl::init(false), cl::Hidden);
+static cl::opt<bool> IfCvtBranchFold("ifcvt-branch-fold",
+ cl::init(true), cl::Hidden);
STATISTIC(NumSimple, "Number of simple if-conversions performed");
STATISTIC(NumSimpleFalse, "Number of simple (F) if-conversions performed");
@@ -115,7 +118,7 @@ namespace {
BB(0), TrueBB(0), FalseBB(0) {}
};
- /// IfcvtToken - Record information about pending if-conversions to attemp:
+ /// IfcvtToken - Record information about pending if-conversions to attempt:
/// BBI - Corresponding BBInfo.
/// Kind - Type of block. See IfcvtKind.
/// NeedSubsumption - True if the to-be-predicated BB has already been
@@ -146,6 +149,7 @@ namespace {
const TargetLowering *TLI;
const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
bool MadeChange;
int FnNum;
public:
@@ -167,8 +171,7 @@ namespace {
std::vector<IfcvtToken*> &Tokens);
bool FeasibilityAnalysis(BBInfo &BBI, SmallVectorImpl<MachineOperand> &Cond,
bool isTriangle = false, bool RevBranch = false);
- bool AnalyzeBlocks(MachineFunction &MF,
- std::vector<IfcvtToken*> &Tokens);
+ void AnalyzeBlocks(MachineFunction &MF, std::vector<IfcvtToken*> &Tokens);
void InvalidatePreds(MachineBasicBlock *BB);
void RemoveExtraEdges(BBInfo &BBI);
bool IfConvertSimple(BBInfo &BBI, IfcvtKind Kind);
@@ -177,14 +180,22 @@ namespace {
unsigned NumDups1, unsigned NumDups2);
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond);
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr = false);
- void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI);
+ void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
- bool MeetIfcvtSizeLimit(unsigned Size) const {
- return Size > 0 && Size <= TLI->getIfCvtBlockSizeLimit();
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size) const {
+ return Size > 0 && TII->isProfitableToIfCvt(BB, Size);
+ }
+
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
+ MachineBasicBlock &FBB, unsigned FSize) const {
+ return TSize > 0 && FSize > 0 &&
+ TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize);
}
// blockAlwaysFallThrough - Block ends without a terminator.
@@ -227,8 +238,15 @@ FunctionPass *llvm::createIfConverterPass() { return new IfConverter(); }
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getTarget().getTargetLowering();
TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
if (!TII) return false;
+ // Tail merge tend to expose more if-conversion opportunities.
+ BranchFolder BF(true);
+ bool BFChange = BF.OptimizeFunction(MF, TII,
+ MF.getTarget().getRegisterInfo(),
+ getAnalysisIfAvailable<MachineModuleInfo>());
+
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
<< MF.getFunction()->getName() << "\'");
@@ -253,7 +271,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
while (IfCvtLimit == -1 || (int)NumIfCvts < IfCvtLimit) {
// Do an initial analysis for each basic block and find all the potential
// candidates to perform if-conversion.
- bool Change = AnalyzeBlocks(MF, Tokens);
+ bool Change = false;
+ AnalyzeBlocks(MF, Tokens);
while (!Tokens.empty()) {
IfcvtToken *Token = Tokens.back();
Tokens.pop_back();
@@ -281,7 +300,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
- DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ? " false" :"")
+ DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ?
+ " false" : "")
<< "): BB#" << BBI.BB->getNumber() << " ("
<< ((Kind == ICSimpleFalse)
? BBI.FalseBB->getNumber()
@@ -289,8 +309,8 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
RetVal = IfConvertSimple(BBI, Kind);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
- if (isFalse) NumSimpleFalse++;
- else NumSimple++;
+ if (isFalse) ++NumSimpleFalse;
+ else ++NumSimple;
}
break;
}
@@ -316,11 +336,11 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) {
if (isFalse) {
- if (isRev) NumTriangleFRev++;
- else NumTriangleFalse++;
+ if (isRev) ++NumTriangleFRev;
+ else ++NumTriangleFalse;
} else {
- if (isRev) NumTriangleRev++;
- else NumTriangle++;
+ if (isRev) ++NumTriangleRev;
+ else ++NumTriangle;
}
}
break;
@@ -332,7 +352,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
<< BBI.FalseBB->getNumber() << ") ");
RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2);
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
- if (RetVal) NumDiamonds++;
+ if (RetVal) ++NumDiamonds;
break;
}
}
@@ -361,13 +381,14 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
Roots.clear();
BBAnalysis.clear();
- if (MadeChange) {
+ if (MadeChange && IfCvtBranchFold) {
BranchFolder BF(false);
BF.OptimizeFunction(MF, TII,
MF.getTarget().getRegisterInfo(),
getAnalysisIfAvailable<MachineModuleInfo>());
}
+ MadeChange |= BFChange;
return MadeChange;
}
@@ -387,9 +408,10 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
/// ReverseBranchCondition - Reverse the condition of the end of the block
/// branch. Swap block's 'true' and 'false' successors.
bool IfConverter::ReverseBranchCondition(BBInfo &BBI) {
+ DebugLoc dl; // FIXME: this is nowhere
if (!TII->ReverseBranchCondition(BBI.BrCond)) {
TII->RemoveBranch(*BBI.BB);
- TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond);
+ TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond, dl);
std::swap(BBI.TrueBB, BBI.FalseBB);
return true;
}
@@ -420,7 +442,7 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
if (TrueBBI.BB->pred_size() > 1) {
if (TrueBBI.CannotBeCopied ||
- TrueBBI.NonPredSize > TLI->getIfCvtDupBlockSizeLimit())
+ !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize))
return false;
Dups = TrueBBI.NonPredSize;
}
@@ -431,7 +453,7 @@ bool IfConverter::ValidSimple(BBInfo &TrueBBI, unsigned &Dups) const {
/// ValidTriangle - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid triangle shape for ifcvt.
/// If 'FalseBranch' is true, it checks if 'true' block's false branch
-/// branches to the false branch rather than the other way around. It also
+/// branches to the 'false' block rather than the other way around. It also
/// returns the number of instructions that the ifcvt would need to duplicate
/// if performed in 'Dups'.
bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
@@ -457,7 +479,7 @@ bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
++Size;
}
}
- if (Size > TLI->getIfCvtDupBlockSizeLimit())
+ if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size))
return false;
Dups = Size;
}
@@ -514,7 +536,27 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
MachineBasicBlock::iterator TI = TrueBBI.BB->begin();
MachineBasicBlock::iterator FI = FalseBBI.BB->begin();
- while (TI != TrueBBI.BB->end() && FI != FalseBBI.BB->end()) {
+ MachineBasicBlock::iterator TIE = TrueBBI.BB->end();
+ MachineBasicBlock::iterator FIE = FalseBBI.BB->end();
+ // Skip dbg_value instructions
+ while (TI != TIE && TI->isDebugValue())
+ ++TI;
+ while (FI != FIE && FI->isDebugValue())
+ ++FI;
+ while (TI != TIE && FI != FIE) {
+ // Skip dbg_value instructions. These do not count.
+ if (TI->isDebugValue()) {
+ while (TI != TIE && TI->isDebugValue())
+ ++TI;
+ if (TI == TIE)
+ break;
+ }
+ if (FI->isDebugValue()) {
+ while (FI != FIE && FI->isDebugValue())
+ ++FI;
+ if (FI == FIE)
+ break;
+ }
if (!TI->isIdenticalTo(FI))
break;
++Dups1;
@@ -524,7 +566,27 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
TI = firstNonBranchInst(TrueBBI.BB, TII);
FI = firstNonBranchInst(FalseBBI.BB, TII);
- while (TI != TrueBBI.BB->begin() && FI != FalseBBI.BB->begin()) {
+ MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
+ MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
+ // Skip dbg_value instructions at end of the bb's.
+ while (TI != TIB && TI->isDebugValue())
+ --TI;
+ while (FI != FIB && FI->isDebugValue())
+ --FI;
+ while (TI != TIB && FI != FIB) {
+ // Skip dbg_value instructions. These do not count.
+ if (TI->isDebugValue()) {
+ while (TI != TIB && TI->isDebugValue())
+ --TI;
+ if (TI == TIB)
+ break;
+ }
+ if (FI->isDebugValue()) {
+ while (FI != FIB && FI->isDebugValue())
+ --FI;
+ if (FI == FIB)
+ break;
+ }
if (!TI->isIdenticalTo(FI))
break;
++Dups2;
@@ -556,7 +618,7 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
// No false branch. This BB must end with a conditional branch and a
// fallthrough.
if (!BBI.FalseBB)
- BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
+ BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
if (!BBI.FalseBB) {
// Malformed bcc? True and false blocks are the same?
BBI.IsUnpredicable = true;
@@ -569,6 +631,9 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
BBI.ClobbersPred = false;
for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
+
const TargetInstrDesc &TID = I->getDesc();
if (TID.isNotDuplicable())
BBI.CannotBeCopied = true;
@@ -702,8 +767,8 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
bool FNeedSub = FalseBBI.Predicate.size() > 0;
bool Enqueued = false;
if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize - (Dups + Dups2)) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize - (Dups + Dups2)) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize - (Dups + Dups2),
+ *FalseBBI.BB, FalseBBI.NonPredSize - (Dups + Dups2)) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
// Diamond:
@@ -720,7 +785,7 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
}
if (ValidTriangle(TrueBBI, FalseBBI, false, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
// Triangle:
// EBB
@@ -732,23 +797,23 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
Tokens.push_back(new IfcvtToken(BBI, ICTriangle, TNeedSub, Dups));
Enqueued = true;
}
-
+
if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(TrueBBI, Dups) &&
- MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
// Simple (split, no rejoin):
// EBB
// | \_
// | |
// | TBB---> exit
- // |
+ // |
// FBB
Tokens.push_back(new IfcvtToken(BBI, ICSimple, TNeedSub, Dups));
Enqueued = true;
@@ -757,21 +822,21 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
if (CanRevCond) {
// Try the other path...
if (ValidTriangle(FalseBBI, TrueBBI, false, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
Enqueued = true;
}
if (ValidTriangle(FalseBBI, TrueBBI, true, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
Enqueued = true;
}
if (ValidSimple(FalseBBI, Dups) &&
- MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+ MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
Enqueued = true;
@@ -785,11 +850,9 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
}
/// AnalyzeBlocks - Analyze all blocks and find entries for all if-conversion
-/// candidates. It returns true if any CFG restructuring is done to expose more
-/// if-conversion opportunities.
-bool IfConverter::AnalyzeBlocks(MachineFunction &MF,
+/// candidates.
+void IfConverter::AnalyzeBlocks(MachineFunction &MF,
std::vector<IfcvtToken*> &Tokens) {
- bool Change = false;
std::set<MachineBasicBlock*> Visited;
for (unsigned i = 0, e = Roots.size(); i != e; ++i) {
for (idf_ext_iterator<MachineBasicBlock*> I=idf_ext_begin(Roots[i],Visited),
@@ -801,20 +864,23 @@ bool IfConverter::AnalyzeBlocks(MachineFunction &MF,
// Sort to favor more complex ifcvt scheme.
std::stable_sort(Tokens.begin(), Tokens.end(), IfcvtTokenCmp);
-
- return Change;
}
/// canFallThroughTo - Returns true either if ToBB is the next block after BB or
/// that all the intervening blocks are empty (given BB can fall through to its
/// next block).
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
- MachineFunction::iterator I = BB;
+ MachineFunction::iterator PI = BB;
+ MachineFunction::iterator I = llvm::next(PI);
MachineFunction::iterator TI = ToBB;
MachineFunction::iterator E = BB->getParent()->end();
- while (++I != TI)
- if (I == E || !I->empty())
+ while (I != TI) {
+ // Check isSuccessor to avoid case where the next block is empty, but
+ // it's not a successor.
+ if (I == E || !I->empty() || !PI->isSuccessor(I))
return false;
+ PI = I++;
+ }
return true;
}
@@ -836,8 +902,9 @@ void IfConverter::InvalidatePreds(MachineBasicBlock *BB) {
///
static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
- TII->InsertBranch(*BB, ToBB, NULL, NoCond);
+ TII->InsertBranch(*BB, ToBB, NULL, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
@@ -849,6 +916,66 @@ void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
}
+/// InitPredRedefs / UpdatePredRedefs - Defs by predicated instructions are
+/// modeled as read + write (sort like two-address instructions). These
+/// routines track register liveness and add implicit uses to if-converted
+/// instructions to conform to the model.
+static void InitPredRedefs(MachineBasicBlock *BB, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
+ E = BB->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ Redefs.insert(Reg);
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ Redefs.insert(*Subreg);
+ }
+}
+
+static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI,
+ bool AddImpUse = false) {
+ SmallVector<unsigned, 4> Defs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (MO.isDef())
+ Defs.push_back(Reg);
+ else if (MO.isKill()) {
+ Redefs.erase(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.erase(*SR);
+ }
+ }
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Reg = Defs[i];
+ if (Redefs.count(Reg)) {
+ if (AddImpUse)
+ // Treat predicated update as read + write.
+ MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
+ true/*IsImp*/,false/*IsKill*/));
+ } else {
+ Redefs.insert(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.insert(*SR);
+ }
+ }
+}
+
+static void UpdatePredRedefs(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E,
+ SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ while (I != E) {
+ UpdatePredRedefs(I, Redefs, TRI);
+ ++I;
+ }
+}
+
/// IfConvertSimple - If convert a simple (split, no rejoin) sub-CFG.
///
bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
@@ -873,13 +1000,19 @@ bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
if (TII->ReverseBranchCondition(Cond))
assert(false && "Unable to reverse branch condition!");
+ // Initialize liveins to the first BB. These are potentiall redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs);
} else {
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Merge converted block into entry block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
@@ -922,6 +1055,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
BBInfo *CvtBBI = &TrueBBI;
BBInfo *NextBBI = &FalseBBI;
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
@@ -957,21 +1091,26 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
}
}
+ // Initialize liveins to the first BB. These are potentially redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
bool HasEarlyExit = CvtBBI->FalseBB != NULL;
- bool DupBB = CvtBBI->BB->pred_size() > 1;
- if (DupBB) {
+ if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond, true);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs, true);
} else {
// Predicate the 'true' block after removing its branch.
CvtBBI->NonPredSize -= TII->RemoveBranch(*CvtBBI->BB);
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Now merge the entry of the triangle with the true block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
- MergeBlocks(BBI, *CvtBBI);
+ MergeBlocks(BBI, *CvtBBI, false);
}
// If 'true' block has a 'false' successor, add an exit branch to it.
@@ -980,7 +1119,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
assert(false && "Unable to reverse branch condition!");
- TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond);
+ TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
}
@@ -1009,7 +1148,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
RemoveExtraEdges(BBI);
// Update block info. BB can be iteratively if-converted.
- if (!IterIfcvt)
+ if (!IterIfcvt)
BBI.IsDone = true;
InvalidatePreds(BBI.BB);
CvtBBI->IsDone = true;
@@ -1044,9 +1183,9 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
return false;
}
- // Merge the 'true' and 'false' blocks by copying the instructions
- // from the 'false' block to the 'true' block. That is, unless the true
- // block would clobber the predicate, in that case, do the opposite.
+ // Put the predicated instructions from the 'true' block before the
+ // instructions from the 'false' block, unless the true block would clobber
+ // the predicate, in which case, do the opposite.
BBInfo *BBI1 = &TrueBBI;
BBInfo *BBI2 = &FalseBBI;
SmallVector<MachineOperand, 4> RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
@@ -1071,39 +1210,72 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// Remove the conditional branch from entry to the blocks.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+ // Initialize liveins to the first BB. These are potentially redefined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(BBI1->BB, Redefs, TRI);
+
// Remove the duplicated instructions at the beginnings of both paths.
MachineBasicBlock::iterator DI1 = BBI1->BB->begin();
MachineBasicBlock::iterator DI2 = BBI2->BB->begin();
+ MachineBasicBlock::iterator DIE1 = BBI1->BB->end();
+ MachineBasicBlock::iterator DIE2 = BBI2->BB->end();
+ // Skip dbg_value instructions
+ while (DI1 != DIE1 && DI1->isDebugValue())
+ ++DI1;
+ while (DI2 != DIE2 && DI2->isDebugValue())
+ ++DI2;
BBI1->NonPredSize -= NumDups1;
BBI2->NonPredSize -= NumDups1;
+
+ // Skip past the dups on each side separately since there may be
+ // differing dbg_value entries.
+ for (unsigned i = 0; i < NumDups1; ++DI1) {
+ if (!DI1->isDebugValue())
+ ++i;
+ }
while (NumDups1 != 0) {
- ++DI1;
++DI2;
- --NumDups1;
+ if (!DI2->isDebugValue())
+ --NumDups1;
}
+
+ UpdatePredRedefs(BBI1->BB->begin(), DI1, Redefs, TRI);
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
// Predicate the 'true' block after removing its branch.
BBI1->NonPredSize -= TII->RemoveBranch(*BBI1->BB);
DI1 = BBI1->BB->end();
- for (unsigned i = 0; i != NumDups2; ++i)
+ for (unsigned i = 0; i != NumDups2; ) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI1 != BBI1->BB->begin());
--DI1;
+ // skip dbg_value instructions
+ if (!DI1->isDebugValue())
+ ++i;
+ }
BBI1->BB->erase(DI1, BBI1->BB->end());
- PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1);
+ PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, Redefs);
// Predicate the 'false' block.
BBI2->NonPredSize -= TII->RemoveBranch(*BBI2->BB);
DI2 = BBI2->BB->end();
while (NumDups2 != 0) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI2 != BBI2->BB->begin());
--DI2;
- --NumDups2;
+ // skip dbg_value instructions
+ if (!DI2->isDebugValue())
+ --NumDups2;
}
- PredicateBlock(*BBI2, DI2, *Cond2);
+ PredicateBlock(*BBI2, DI2, *Cond2, Redefs);
// Merge the true block into the entry of the diamond.
- MergeBlocks(BBI, *BBI1);
- MergeBlocks(BBI, *BBI2);
+ MergeBlocks(BBI, *BBI1, TailBB == 0);
+ MergeBlocks(BBI, *BBI2, TailBB == 0);
// If the if-converted block falls through or unconditionally branches into
// the tail block, and the tail block does not have other predecessors, then
@@ -1111,16 +1283,32 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
// tail, add a unconditional branch to it.
if (TailBB) {
BBInfo TailBBI = BBAnalysis[TailBB->getNumber()];
- if (TailBB->pred_size() == 1 && !TailBBI.HasFallThrough) {
- BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+ bool CanMergeTail = !TailBBI.HasFallThrough;
+ // There may still be a fall-through edge from BBI1 or BBI2 to TailBB;
+ // check if there are any other predecessors besides those.
+ unsigned NumPreds = TailBB->pred_size();
+ if (NumPreds > 1)
+ CanMergeTail = false;
+ else if (NumPreds == 1 && CanMergeTail) {
+ MachineBasicBlock::pred_iterator PI = TailBB->pred_begin();
+ if (*PI != BBI1->BB && *PI != BBI2->BB)
+ CanMergeTail = false;
+ }
+ if (CanMergeTail) {
MergeBlocks(BBI, TailBBI);
TailBBI.IsDone = true;
} else {
+ BBI.BB->addSuccessor(TailBB);
InsertUncondBranch(BBI.BB, TailBB, TII);
BBI.HasFallThrough = false;
}
}
+ // RemoveExtraEdges won't work if the block has an unanalyzable branch,
+ // which can happen here if TailBB is unanalyzable and is merged, so
+ // explicitly remove BBI1 and BBI2 as successors.
+ BBI.BB->removeSuccessor(BBI1->BB);
+ BBI.BB->removeSuccessor(BBI2->BB);
RemoveExtraEdges(BBI);
// Update block info.
@@ -1135,9 +1323,10 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
/// specified end with the specified condition.
void IfConverter::PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond) {
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs) {
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
- if (TII->isPredicated(I))
+ if (I->isDebugValue() || TII->isPredicated(I))
continue;
if (!TII->PredicateInstruction(I, Cond)) {
#ifndef NDEBUG
@@ -1145,6 +1334,10 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
#endif
llvm_unreachable(0);
}
+
+ // If the predicated instruction now redefines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(I, Redefs, TRI, true);
}
std::copy(Cond.begin(), Cond.end(), std::back_inserter(BBI.Predicate));
@@ -1152,48 +1345,55 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
BBI.IsAnalyzed = false;
BBI.NonPredSize = 0;
- NumIfConvBBs++;
+ ++NumIfConvBBs;
}
/// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
/// the destination block. Skip end of block branches if IgnoreBr is true.
void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr) {
MachineFunction &MF = *ToBBI.BB->getParent();
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
const TargetInstrDesc &TID = I->getDesc();
- bool isPredicated = TII->isPredicated(I);
// Do not copy the end of the block branches.
- if (IgnoreBr && !isPredicated && TID.isBranch())
+ if (IgnoreBr && TID.isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
- if (!isPredicated)
+ if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(0);
}
+ }
+
+ // If the predicated instruction now redefines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(MI, Redefs, TRI, true);
}
- std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
- FromBBI.BB->succ_end());
- MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
- MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
+ if (!IgnoreBr) {
+ std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
+ FromBBI.BB->succ_end());
+ MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
+ MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
- for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
- MachineBasicBlock *Succ = Succs[i];
- // Fallthrough edge can't be transferred.
- if (Succ == FallThrough)
- continue;
- ToBBI.BB->addSuccessor(Succ);
+ for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
+ MachineBasicBlock *Succ = Succs[i];
+ // Fallthrough edge can't be transferred.
+ if (Succ == FallThrough)
+ continue;
+ ToBBI.BB->addSuccessor(Succ);
+ }
}
std::copy(FromBBI.Predicate.begin(), FromBBI.Predicate.end(),
@@ -1203,25 +1403,18 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.IsAnalyzed = false;
- NumDupBBs++;
+ ++NumDupBBs;
}
/// MergeBlocks - Move all instructions from FromBB to the end of ToBB.
-///
-void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI) {
+/// This will leave FromBB as an empty block, so remove all of its
+/// successor edges except for the fall-through edge. If AddEdges is true,
+/// i.e., when FromBBI's branch is being moved, add those successor edges to
+/// ToBBI.
+void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
ToBBI.BB->splice(ToBBI.BB->end(),
FromBBI.BB, FromBBI.BB->begin(), FromBBI.BB->end());
- // Redirect all branches to FromBB to ToBB.
- std::vector<MachineBasicBlock *> Preds(FromBBI.BB->pred_begin(),
- FromBBI.BB->pred_end());
- for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
- MachineBasicBlock *Pred = Preds[i];
- if (Pred == ToBBI.BB)
- continue;
- Pred->ReplaceUsesOfBlockWith(FromBBI.BB, ToBBI.BB);
- }
-
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
@@ -1233,7 +1426,8 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI) {
if (Succ == FallThrough)
continue;
FromBBI.BB->removeSuccessor(Succ);
- ToBBI.BB->addSuccessor(Succ);
+ if (AddEdges)
+ ToBBI.BB->addSuccessor(Succ);
}
// Now FromBBI always falls through to the next block!
diff --git a/contrib/llvm/lib/CodeGen/InlineSpiller.cpp b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
new file mode 100644
index 0000000..12adcaa
--- /dev/null
+++ b/contrib/llvm/lib/CodeGen/InlineSpiller.cpp
@@ -0,0 +1,408 @@
+//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The inline spiller modifies the machine function directly instead of
+// inserting spills and restores in VirtRegMap.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "spiller"
+#include "Spiller.h"
+#include "VirtRegMap.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class InlineSpiller : public Spiller {
+ MachineFunction &mf_;
+ LiveIntervals &lis_;
+ VirtRegMap &vrm_;
+ MachineFrameInfo &mfi_;
+ MachineRegisterInfo &mri_;
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+ const BitVector reserved_;
+
+ // Variables that are valid during spill(), but used by multiple methods.
+ LiveInterval *li_;
+ std::vector<LiveInterval*> *newIntervals_;
+ const TargetRegisterClass *rc_;
+ int stackSlot_;
+ const SmallVectorImpl<LiveInterval*> *spillIs_;
+
+ // Values of the current interval that can potentially remat.
+ SmallPtrSet<VNInfo*, 8> reMattable_;
+
+ // Values in reMattable_ that failed to remat at some point.
+ SmallPtrSet<VNInfo*, 8> usedValues_;
+
+ ~InlineSpiller() {}
+
+public:
+ InlineSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
+ : mf_(*mf), lis_(*lis), vrm_(*vrm),
+ mfi_(*mf->getFrameInfo()),
+ mri_(mf->getRegInfo()),
+ tii_(*mf->getTarget().getInstrInfo()),
+ tri_(*mf->getTarget().getRegisterInfo()),
+ reserved_(tri_.getReservedRegs(mf_)) {}
+
+ void spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs,
+ SlotIndex *earliestIndex);
+
+private:
+ bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+ SlotIndex UseIdx);
+ bool reMaterializeFor(MachineBasicBlock::iterator MI);
+ void reMaterializeAll();
+
+ bool foldMemoryOperand(MachineBasicBlock::iterator MI,
+ const SmallVectorImpl<unsigned> &Ops);
+ void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
+ void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
+};
+}
+
+namespace llvm {
+Spiller *createInlineSpiller(MachineFunction *mf,
+ LiveIntervals *lis,
+ const MachineLoopInfo *mli,
+ VirtRegMap *vrm) {
+ return new InlineSpiller(mf, lis, vrm);
+}
+}
+
+/// allUsesAvailableAt - Return true if all registers used by OrigMI at
+/// OrigIdx are also available with the same value at UseIdx.
+bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
+ SlotIndex OrigIdx,
+ SlotIndex UseIdx) {
+ OrigIdx = OrigIdx.getUseIndex();
+ UseIdx = UseIdx.getUseIndex();
+ for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = OrigMI->getOperand(i);
+ if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
+ continue;
+ // Reserved registers are OK.
+ if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
+ continue;
+ // We don't want to move any defs.
+ if (MO.isDef())
+ return false;
+ // We cannot depend on virtual registers in spillIs_. They will be spilled.
+ for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
+ if ((*spillIs_)[si]->reg == MO.getReg())
+ return false;
+
+ LiveInterval &LI = lis_.getInterval(MO.getReg());
+ const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
+ if (!OVNI)
+ continue;
+ if (OVNI != LI.getVNInfoAt(UseIdx))
+ return false;
+ }
+ return true;
+}
+
+/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
+/// reloading it.
+bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
+ SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
+ VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
+ if (!OrigVNI) {
+ DEBUG(dbgs() << "\tadding <undef> flags: ");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
+ MO.setIsUndef();
+ }
+ DEBUG(dbgs() << UseIdx << '\t' << *MI);
+ return true;
+ }
+ if (!reMattable_.count(OrigVNI)) {
+ DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
+ << UseIdx << '\t' << *MI);
+ return false;
+ }
+ MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
+ if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
+ usedValues_.insert(OrigVNI);
+ DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
+ return false;
+ }
+
+ // If the instruction also writes li_->reg, it had better not require the same
+ // register for uses and defs.
+ bool Reads, Writes;
+ SmallVector<unsigned, 8> Ops;
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
+ if (Writes) {
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
+ usedValues_.insert(OrigVNI);
+ DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
+ return false;
+ }
+ }
+ }
+
+ // Alocate a new register for the remat.
+ unsigned NewVReg = mri_.createVirtualRegister(rc_);
+ vrm_.grow();
+ LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ NewLI.markNotSpillable();
+ newIntervals_->push_back(&NewLI);
+
+ // Finally we can rematerialize OrigMI before MI.
+ MachineBasicBlock &MBB = *MI->getParent();
+ tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
+ MachineBasicBlock::iterator RematMI = MI;
+ SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
+ DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI);
+
+ // Replace operands
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
+ MO.setReg(NewVReg);
+ MO.setIsKill();
+ }
+ }
+ DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
+
+ VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
+ DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
+ return true;
+}
+
+/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
+/// and trim the live ranges after.
+void InlineSpiller::reMaterializeAll() {
+ // Do a quick scan of the interval values to find if any are remattable.
+ reMattable_.clear();
+ usedValues_.clear();
+ for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
+ E = li_->vni_end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->isUnused() || !VNI->isDefAccurate())
+ continue;
+ MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
+ if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
+ continue;
+ reMattable_.insert(VNI);
+ }
+
+ // Often, no defs are remattable.
+ if (reMattable_.empty())
+ return;
+
+ // Try to remat before all uses of li_->reg.
+ bool anyRemat = false;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ RI = mri_.use_nodbg_begin(li_->reg);
+ MachineInstr *MI = RI.skipInstruction();)
+ anyRemat |= reMaterializeFor(MI);
+
+ if (!anyRemat)
+ return;
+
+ // Remove any values that were completely rematted.
+ bool anyRemoved = false;
+ for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
+ E = reMattable_.end(); I != E; ++I) {
+ VNInfo *VNI = *I;
+ if (VNI->hasPHIKill() || usedValues_.count(VNI))
+ continue;
+ MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
+ DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
+ lis_.RemoveMachineInstrFromMaps(DefMI);
+ vrm_.RemoveMachineInstrFromMaps(DefMI);
+ DefMI->eraseFromParent();
+ li_->removeValNo(VNI);
+ anyRemoved = true;
+ }
+
+ if (!anyRemoved)
+ return;
+
+ // Removing values may cause debug uses where li_ is not live.
+ for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
+ MachineInstr *MI = RI.skipInstruction();) {
+ if (!MI->isDebugValue())
+ continue;
+ // Try to preserve the debug value if li_ is live immediately after it.
+ MachineBasicBlock::iterator NextMI = MI;
+ ++NextMI;
+ if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
+ SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
+ if (li_->liveAt(NearIdx))
+ continue;
+ }
+ DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
+ MI->eraseFromParent();
+ }
+}
+
+/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
+/// Return true on success, and MI will be erased.
+bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
+ const SmallVectorImpl<unsigned> &Ops) {
+ // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
+ // operands.
+ SmallVector<unsigned, 8> FoldOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ unsigned Idx = Ops[i];
+ MachineOperand &MO = MI->getOperand(Idx);
+ if (MO.isImplicit())
+ continue;
+ // FIXME: Teach targets to deal with subregs.
+ if (MO.getSubReg())
+ return false;
+ // Tied use operands should not be passed to foldMemoryOperand.
+ if (!MI->isRegTiedToDefOperand(Idx))
+ FoldOps.push_back(Idx);
+ }
+
+ MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
+ if (!FoldMI)
+ return false;
+ lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
+ vrm_.addSpillSlotUse(stackSlot_, FoldMI);
+ MI->eraseFromParent();
+ DEBUG(dbgs() << "\tfolded: " << *FoldMI);
+ return true;
+}
+
+/// insertReload - Insert a reload of NewLI.reg before MI.
+void InlineSpiller::insertReload(LiveInterval &NewLI,
+ MachineBasicBlock::iterator MI) {
+ MachineBasicBlock &MBB = *MI->getParent();
+ SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
+ tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
+ --MI; // Point to load instruction.
+ SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ vrm_.addSpillSlotUse(stackSlot_, MI);
+ DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
+ VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
+}
+
+/// insertSpill - Insert a spill of NewLI.reg after MI.
+void InlineSpiller::insertSpill(LiveInterval &NewLI,
+ MachineBasicBlock::iterator MI) {
+ MachineBasicBlock &MBB = *MI->getParent();
+ SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
+ tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
+ --MI; // Point to store instruction.
+ SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
+ vrm_.addSpillSlotUse(stackSlot_, MI);
+ DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
+ VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
+ lis_.getVNInfoAllocator());
+ NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
+}
+
+void InlineSpiller::spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs,
+ SlotIndex *earliestIndex) {
+ DEBUG(dbgs() << "Inline spilling " << *li << "\n");
+ assert(li->isSpillable() && "Attempting to spill already spilled value.");
+ assert(!li->isStackSlot() && "Trying to spill a stack slot.");
+
+ li_ = li;
+ newIntervals_ = &newIntervals;
+ rc_ = mri_.getRegClass(li->reg);
+ spillIs_ = &spillIs;
+
+ reMaterializeAll();
+
+ // Remat may handle everything.
+ if (li_->empty())
+ return;
+
+ stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
+
+ // Iterate over instructions using register.
+ for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
+ MachineInstr *MI = RI.skipInstruction();) {
+
+ // Debug values are not allowed to affect codegen.
+ if (MI->isDebugValue()) {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ uint64_t Offset = MI->getOperand(1).getImm();
+ const MDNode *MDPtr = MI->getOperand(2).getMetadata();
+ DebugLoc DL = MI->getDebugLoc();
+ if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
+ Offset, MDPtr, DL)) {
+ DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+ MachineBasicBlock *MBB = MI->getParent();
+ MBB->insert(MBB->erase(MI), NewDV);
+ } else {
+ DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
+ MI->eraseFromParent();
+ }
+ continue;
+ }
+
+ // Analyze instruction.
+ bool Reads, Writes;
+ SmallVector<unsigned, 8> Ops;
+ tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
+
+ // Attempt to fold memory ops.
+ if (foldMemoryOperand(MI, Ops))
+ continue;
+
+ // Allocate interval around instruction.
+ // FIXME: Infer regclass from instruction alone.
+ unsigned NewVReg = mri_.createVirtualRegister(rc_);
+ vrm_.grow();
+ LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
+ NewLI.markNotSpillable();
+
+ if (Reads)
+ insertReload(NewLI, MI);
+
+ // Rewrite instruction operands.
+ bool hasLiveDef = false;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(Ops[i]);
+ MO.setReg(NewVReg);
+ if (MO.isUse()) {
+ if (!MI->isRegTiedToDefOperand(Ops[i]))
+ MO.setIsKill();
+ } else {
+ if (!MO.isDead())
+ hasLiveDef = true;
+ }
+ }
+
+ // FIXME: Use a second vreg if instruction has no tied ops.
+ if (Writes && hasLiveDef)
+ insertSpill(NewLI, MI);
+
+ DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
+ newIntervals.push_back(&NewLI);
+ }
+}
diff --git a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 63bb5f2..03ae214 100644
--- a/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -16,6 +16,7 @@
#include "llvm/Module.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
@@ -314,21 +315,22 @@ static Value *LowerCTLZ(LLVMContext &Context, Value *V, Instruction *IP) {
static void ReplaceFPIntrinsicWithCall(CallInst *CI, const char *Fname,
const char *Dname,
const char *LDname) {
- switch (CI->getOperand(1)->getType()->getTypeID()) {
+ CallSite CS(CI);
+ switch (CI->getArgOperand(0)->getType()->getTypeID()) {
default: llvm_unreachable("Invalid type in intrinsic");
case Type::FloatTyID:
- ReplaceCallWith(Fname, CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith(Fname, CI, CS.arg_begin(), CS.arg_end(),
Type::getFloatTy(CI->getContext()));
break;
case Type::DoubleTyID:
- ReplaceCallWith(Dname, CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith(Dname, CI, CS.arg_begin(), CS.arg_end(),
Type::getDoubleTy(CI->getContext()));
break;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
- ReplaceCallWith(LDname, CI, CI->op_begin() + 1, CI->op_end(),
- CI->getOperand(1)->getType());
+ ReplaceCallWith(LDname, CI, CS.arg_begin(), CS.arg_end(),
+ CI->getArgOperand(0)->getType());
break;
}
}
@@ -340,6 +342,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
const Function *Callee = CI->getCalledFunction();
assert(Callee && "Cannot lower an indirect call!");
+ CallSite CS(CI);
switch (Callee->getIntrinsicID()) {
case Intrinsic::not_intrinsic:
report_fatal_error("Cannot lower a call to a non-intrinsic function '"+
@@ -353,7 +356,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
// by the lowerinvoke pass. In both cases, the right thing to do is to
// convert the call to an explicit setjmp or longjmp call.
case Intrinsic::setjmp: {
- Value *V = ReplaceCallWith("setjmp", CI, CI->op_begin() + 1, CI->op_end(),
+ Value *V = ReplaceCallWith("setjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getInt32Ty(Context));
if (!CI->getType()->isVoidTy())
CI->replaceAllUsesWith(V);
@@ -365,32 +368,32 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
case Intrinsic::longjmp: {
- ReplaceCallWith("longjmp", CI, CI->op_begin() + 1, CI->op_end(),
+ ReplaceCallWith("longjmp", CI, CS.arg_begin(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::siglongjmp: {
// Insert the call to abort
- ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(),
+ ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::ctpop:
- CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::bswap:
- CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::ctlz:
- CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getOperand(1), CI));
+ CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
break;
case Intrinsic::cttz: {
// cttz(x) -> ctpop(~X & (X-1))
- Value *Src = CI->getOperand(1);
+ Value *Src = CI->getArgOperand(0);
Value *NotSrc = Builder.CreateNot(Src);
NotSrc->setName(Src->getName() + ".not");
Value *SrcM1 = ConstantInt::get(Src->getType(), 1);
@@ -451,37 +454,37 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
case Intrinsic::memcpy: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
- Ops[1] = CI->getOperand(2);
+ Ops[0] = CI->getArgOperand(0);
+ Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
- ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memmove: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
- Ops[1] = CI->getOperand(2);
+ Ops[0] = CI->getArgOperand(0);
+ Ops[1] = CI->getArgOperand(1);
Ops[2] = Size;
- ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::memset: {
const IntegerType *IntPtr = TD.getIntPtrType(Context);
- Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+ Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
- Ops[0] = CI->getOperand(1);
+ Ops[0] = CI->getArgOperand(0);
// Extend the amount to i32.
- Ops[1] = Builder.CreateIntCast(CI->getOperand(2), Type::getInt32Ty(Context),
+ Ops[1] = Builder.CreateIntCast(CI->getArgOperand(1), Type::getInt32Ty(Context),
/* isSigned */ false);
Ops[2] = Size;
- ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+ ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
break;
}
case Intrinsic::sqrt: {
diff --git a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
index b584704..bf3137e 100644
--- a/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/contrib/llvm/lib/CodeGen/LLVMTargetMachine.cpp
@@ -329,12 +329,15 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
if (OptLevel != CodeGenOpt::None)
PM.add(createOptimizePHIsPass());
- // Delete dead machine instructions regardless of optimization level.
- PM.add(createDeadMachineInstructionElimPass());
- printAndVerify(PM, "After codegen DCE pass",
- /* allowDoubleDefs= */ true);
-
if (OptLevel != CodeGenOpt::None) {
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ PM.add(createDeadMachineInstructionElimPass());
+ printAndVerify(PM, "After codegen DCE pass",
+ /* allowDoubleDefs= */ true);
+
PM.add(createOptimizeExtsPass());
if (!DisableMachineLICM)
PM.add(createMachineLICMPass());
@@ -358,7 +361,7 @@ bool LLVMTargetMachine::addCommonCodeGenPasses(PassManagerBase &PM,
/* allowDoubleDefs= */ true);
// Perform register allocation.
- PM.add(createRegisterAllocator());
+ PM.add(createRegisterAllocator(OptLevel));
printAndVerify(PM, "After Register Allocation");
// Perform stack slot coloring and post-ra machine LICM.
diff --git a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
index 03b4eab..b9527fa 100644
--- a/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/contrib/llvm/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -118,7 +118,7 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
SUnit *LatencyPriorityQueue::pop() {
if (empty()) return NULL;
std::vector<SUnit *>::iterator Best = Queue.begin();
- for (std::vector<SUnit *>::iterator I = next(Queue.begin()),
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
diff --git a/contrib/llvm/lib/CodeGen/LiveInterval.cpp b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
index 025ad05..ad57284 100644
--- a/contrib/llvm/lib/CodeGen/LiveInterval.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveInterval.cpp
@@ -68,6 +68,37 @@ bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
return r->end == I;
}
+/// killedAt - Return true if a live range ends at index. Note that the kill
+/// point is not contained in the half-open live range. It is usually the
+/// getDefIndex() slot following its last use.
+bool LiveInterval::killedAt(SlotIndex I) const {
+ Ranges::const_iterator r = std::lower_bound(ranges.begin(), ranges.end(), I);
+
+ // Now r points to the first interval with start >= I, or ranges.end().
+ if (r == ranges.begin())
+ return false;
+
+ --r;
+ // Now r points to the last interval with end <= I.
+ // r->end is the kill point.
+ return r->end == I;
+}
+
+/// killedInRange - Return true if the interval has kills in [Start,End).
+bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
+ Ranges::const_iterator r =
+ std::lower_bound(ranges.begin(), ranges.end(), End);
+
+ // Now r points to the first interval with start >= End, or ranges.end().
+ if (r == ranges.begin())
+ return false;
+
+ --r;
+ // Now r points to the last interval with end <= End.
+ // r->end is the kill point.
+ return r->end >= Start && r->end < End;
+}
+
// overlaps - Return true if the intersection of the two live intervals is
// not empty.
//
@@ -88,6 +119,7 @@ bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
//
bool LiveInterval::overlapsFrom(const LiveInterval& other,
const_iterator StartPos) const {
+ assert(!empty() && "empty interval");
const_iterator i = begin();
const_iterator ie = end();
const_iterator j = StartPos;
@@ -130,16 +162,8 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
/// by [Start, End).
bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
assert(Start < End && "Invalid range");
- const_iterator I = begin();
- const_iterator E = end();
- const_iterator si = std::upper_bound(I, E, Start);
- const_iterator ei = std::upper_bound(I, E, End);
- if (si != ei)
- return true;
- if (si == I)
- return false;
- --si;
- return si->contains(Start);
+ const_iterator I = std::lower_bound(begin(), end(), End);
+ return I != begin() && (--I)->end > Start;
}
/// extendIntervalEndTo - This method is used when we want to extend the range
@@ -149,7 +173,6 @@ bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
assert(I != ranges.end() && "Not a valid interval!");
VNInfo *ValNo = I->valno;
- SlotIndex OldEnd = I->end;
// Search for the first interval that we can't merge with.
Ranges::iterator MergeTo = next(I);
@@ -163,9 +186,6 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
// Erase any dead ranges.
ranges.erase(next(I), MergeTo);
- // Update kill info.
- ValNo->removeKills(OldEnd, I->end.getPrevSlot());
-
// If the newly formed range now touches the range after it and if they have
// the same value number, merge the two ranges into one range.
Ranges::iterator Next = next(I);
@@ -245,9 +265,6 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
// endpoint as well.
if (End > it->end)
extendIntervalEndTo(it, End);
- else if (End < it->end)
- // Overlapping intervals, there might have been a kill here.
- it->valno->removeKill(End);
return it;
}
} else {
@@ -288,7 +305,6 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
VNInfo *ValNo = I->valno;
if (I->start == Start) {
if (I->end == End) {
- ValNo->removeKills(Start, End);
if (RemoveDeadValNo) {
// Check if val# is dead.
bool isDead = true;
@@ -296,7 +312,7 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
if (II != I && II->valno == ValNo) {
isDead = false;
break;
- }
+ }
if (isDead) {
// Now that ValNo is dead, remove it. If it is the largest value
// number, just nuke it (and any other deleted values neighboring it),
@@ -320,7 +336,6 @@ void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
// Otherwise if the span we are removing is at the end of the LiveRange,
// adjust the other way.
if (I->end == End) {
- ValNo->removeKills(Start, End);
I->end = Start;
return;
}
@@ -529,6 +544,7 @@ void LiveInterval::MergeValueInAsValue(
SmallVector<VNInfo*, 4> ReplacedValNos;
iterator IP = begin();
for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
+ assert(I->valno == RHS.getValNumInfo(I->valno->id) && "Bad VNInfo");
if (I->valno != RHSValNo)
continue;
SlotIndex Start = I->start, End = I->end;
@@ -823,10 +839,12 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
else {
OS << " = ";
for (LiveInterval::Ranges::const_iterator I = ranges.begin(),
- E = ranges.end(); I != E; ++I)
- OS << *I;
+ E = ranges.end(); I != E; ++I) {
+ OS << *I;
+ assert(I->valno == getValNumInfo(I->valno->id) && "Bad VNInfo");
+ }
}
-
+
// Print value number info.
if (getNumValNums()) {
OS << " ";
@@ -843,21 +861,10 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
OS << "?";
else
OS << vni->def;
- unsigned ee = vni->kills.size();
- if (ee || vni->hasPHIKill()) {
- OS << "-(";
- for (unsigned j = 0; j != ee; ++j) {
- OS << vni->kills[j];
- if (j != ee-1)
- OS << " ";
- }
- if (vni->hasPHIKill()) {
- if (ee)
- OS << " ";
- OS << "phi";
- }
- OS << ")";
- }
+ if (vni->hasPHIKill())
+ OS << "-phikill";
+ if (vni->hasRedefByEC())
+ OS << "-ec";
}
}
}
diff --git a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
index a6d38ad..194d03d 100644
--- a/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -50,9 +50,6 @@ using namespace llvm;
static cl::opt<bool> DisableReMat("disable-rematerialization",
cl::init(false), cl::Hidden);
-static cl::opt<bool> EnableFastSpilling("fast-spill",
- cl::init(false), cl::Hidden);
-
STATISTIC(numIntervals , "Number of original intervals");
STATISTIC(numFolds , "Number of loads/stores folded into instructions");
STATISTIC(numSplits , "Number of intervals split");
@@ -90,8 +87,8 @@ void LiveIntervals::releaseMemory() {
r2iMap_.clear();
- // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
- VNInfoAllocator.DestroyAll();
+ // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
+ VNInfoAllocator.Reset();
while (!CloneMIs.empty()) {
MachineInstr *MI = CloneMIs.back();
CloneMIs.pop_back();
@@ -195,6 +192,10 @@ bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
if (SrcReg == li.reg || DstReg == li.reg)
continue;
+ if (MI.isCopy())
+ if (MI.getOperand(0).getReg() == li.reg ||
+ MI.getOperand(1).getReg() == li.reg)
+ continue;
// Check for operands using reg
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
@@ -218,10 +219,7 @@ bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
return false;
}
-/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
-/// it checks for sub-register reference and it can check use as well.
-bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
- unsigned Reg, bool CheckUse,
+bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
@@ -239,12 +237,11 @@ bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
MachineOperand& MO = MI->getOperand(i);
if (!MO.isReg())
continue;
- if (MO.isUse() && !CheckUse)
- continue;
unsigned PhysReg = MO.getReg();
- if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
+ if (PhysReg == 0 || PhysReg == Reg ||
+ TargetRegisterInfo::isVirtualRegister(PhysReg))
continue;
- if (tri_->isSubRegister(Reg, PhysReg))
+ if (tri_->regsOverlap(Reg, PhysReg))
return true;
}
}
@@ -272,7 +269,7 @@ bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
if (MO.getReg() == Reg && MO.isDef()) {
assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
MI.getOperand(MOIdx).getSubReg() &&
- MO.getSubReg());
+ (MO.getSubReg() || MO.isImplicit()));
return true;
}
}
@@ -328,9 +325,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineInstr *CopyMI = NULL;
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
- tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (mi->isCopyLike() ||
+ tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
CopyMI = mi;
+ }
VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
VNInfoAllocator);
@@ -356,7 +354,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
LiveRange LR(defIndex, killIdx, ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR << "\n");
- ValNo->addKill(killIdx);
return;
}
}
@@ -376,7 +373,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// valno in the killing blocks.
assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
DEBUG(dbgs() << " phi-join");
- ValNo->addKill(indexes_->getTerminatorGap(mbb));
ValNo->setHasPHIKill(true);
} else {
// Iterate over all of the blocks that the variable is completely
@@ -407,7 +403,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
}
LiveRange LR(Start, killIdx, ValNo);
interval.addRange(LR);
- ValNo->addKill(killIdx);
DEBUG(dbgs() << " +" << LR);
}
@@ -434,11 +429,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// are actually two values in the live interval. Because of this we
// need to take the LiveRegion that defines this register and split it
// into two values.
- // Two-address vregs should always only be redefined once. This means
- // that at this point, there should be exactly one value number in it.
- assert((PartReDef || interval.containsOneValue()) &&
- "Unexpected 2-addr liveint!");
- SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
SlotIndex RedefIndex = MIIdx.getDefIndex();
if (MO.isEarlyClobber())
RedefIndex = MIIdx.getUseIndex();
@@ -446,8 +436,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
const LiveRange *OldLR =
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
VNInfo *OldValNo = OldLR->valno;
+ SlotIndex DefIndex = OldValNo->def.getDefIndex();
- // Delete the initial value, which should be short and continuous,
+ // Delete the previous value, which should be short and continuous,
// because the 2-addr copy must be in the same MBB as the redef.
interval.removeRange(DefIndex, RedefIndex);
@@ -464,15 +455,14 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (PartReDef &&
- tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (PartReDef && (mi->isCopyLike() ||
+ tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)))
OldValNo->setCopy(&*mi);
// Add the new live interval which replaces the range for the input copy.
LiveRange LR(DefIndex, RedefIndex, ValNo);
DEBUG(dbgs() << " replace range with " << LR);
interval.addRange(LR);
- ValNo->addKill(RedefIndex);
// If this redefinition is dead, we need to add a dummy unit live
// range covering the def slot.
@@ -496,7 +486,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
VNInfo *ValNo;
MachineInstr *CopyMI = NULL;
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
+ if (mi->isCopyLike() ||
tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
CopyMI = mi;
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
@@ -504,7 +494,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
SlotIndex killIndex = getMBBEndIdx(mbb);
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
- ValNo->addKill(indexes_->getTerminatorGap(mbb));
ValNo->setHasPHIKill(true);
DEBUG(dbgs() << " phi-join +" << LR);
} else {
@@ -600,7 +589,6 @@ exit:
ValNo->setHasRedefByEC(true);
LiveRange LR(start, end, ValNo);
interval.addRange(LR);
- LR.valno->addKill(end);
DEBUG(dbgs() << " +" << LR << '\n');
}
@@ -615,7 +603,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
else if (allocatableRegs_[MO.getReg()]) {
MachineInstr *CopyMI = NULL;
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
+ if (MI->isCopyLike() ||
tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
CopyMI = MI;
handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
@@ -701,7 +689,6 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
LiveRange LR(start, end, vni);
interval.addRange(LR);
- LR.valno->addKill(end);
DEBUG(dbgs() << " +" << LR << '\n');
}
@@ -787,37 +774,6 @@ LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
return NewLI;
}
-/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
-/// copy field and returns the source register that defines it.
-unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
- if (!VNI->getCopy())
- return 0;
-
- if (VNI->getCopy()->isExtractSubreg()) {
- // If it's extracting out of a physical register, return the sub-register.
- unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
- unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
- if (SrcSubReg == DstSubReg)
- // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
- // reg1034 can still be coalesced to EDX.
- return Reg;
- assert(DstSubReg == 0);
- Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
- }
- return Reg;
- } else if (VNI->getCopy()->isInsertSubreg() ||
- VNI->getCopy()->isSubregToReg())
- return VNI->getCopy()->getOperand(2).getReg();
-
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
- return SrcReg;
- llvm_unreachable("Unrecognized copy instruction!");
- return 0;
-}
-
//===----------------------------------------------------------------------===//
// Register allocator hooks.
//
@@ -991,22 +947,22 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
if (DefMI && (MRInfo & VirtRegMap::isMod))
return false;
- MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
- : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
+ MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
+ : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Remember this instruction uses the spill slot.
if (isSS) vrm.addSpillSlotUse(Slot, fmi);
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
- MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
vrm.transferEmergencySpills(MI, fmi);
ReplaceMachineInstrInMaps(MI, fmi);
- MI = MBB.insert(MBB.erase(MI), fmi);
+ MI->eraseFromParent();
+ MI = fmi;
++numFolds;
return true;
}
@@ -1098,7 +1054,6 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (!mop.isReg())
continue;
unsigned Reg = mop.getReg();
- unsigned RegI = Reg;
if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
if (Reg != li.reg)
@@ -1140,26 +1095,8 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
//
// Keep track of whether we replace a use and/or def so that we can
// create the spill interval with the appropriate range.
-
- HasUse = mop.isUse();
- HasDef = mop.isDef();
SmallVector<unsigned, 2> Ops;
- Ops.push_back(i);
- for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
- const MachineOperand &MOj = MI->getOperand(j);
- if (!MOj.isReg())
- continue;
- unsigned RegJ = MOj.getReg();
- if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
- continue;
- if (RegJ == RegI) {
- Ops.push_back(j);
- if (!MOj.isUndef()) {
- HasUse |= MOj.isUse();
- HasDef |= MOj.isDef();
- }
- }
- }
+ tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
// Create a new virtual register for the spill interval.
// Create the new register now so we can map the fold instruction
@@ -1294,16 +1231,7 @@ bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
const VNInfo *VNI,
MachineBasicBlock *MBB,
SlotIndex Idx) const {
- SlotIndex End = getMBBEndIdx(MBB);
- for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
- if (VNI->kills[j].isPHI())
- continue;
-
- SlotIndex KillIdx = VNI->kills[j];
- if (KillIdx > Idx && KillIdx <= End)
- return true;
- }
- return false;
+ return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
}
/// RewriteInfo - Keep track of machine instrs that will be rewritten
@@ -1312,10 +1240,7 @@ namespace {
struct RewriteInfo {
SlotIndex Index;
MachineInstr *MI;
- bool HasUse;
- bool HasDef;
- RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
- : Index(i), MI(mi), HasUse(u), HasDef(d) {}
+ RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
};
struct RewriteInfoCompare {
@@ -1394,7 +1319,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// easily see a situation where both registers are reloaded before
// the INSERT_SUBREG and both target registers that would overlap.
continue;
- RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
+ RewriteMIs.push_back(RewriteInfo(index, MI));
}
std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
@@ -1404,18 +1329,11 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
RewriteInfo &rwi = RewriteMIs[i];
++i;
SlotIndex index = rwi.Index;
- bool MIHasUse = rwi.HasUse;
- bool MIHasDef = rwi.HasDef;
MachineInstr *MI = rwi.MI;
// If MI def and/or use the same register multiple times, then there
// are multiple entries.
- unsigned NumUses = MIHasUse;
while (i != e && RewriteMIs[i].MI == MI) {
assert(RewriteMIs[i].Index == index);
- bool isUse = RewriteMIs[i].HasUse;
- if (isUse) ++NumUses;
- MIHasUse |= isUse;
- MIHasDef |= RewriteMIs[i].HasDef;
++i;
}
MachineBasicBlock *MBB = MI->getParent();
@@ -1440,7 +1358,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// = use
// It's better to start a new interval to avoid artifically
// extend the new interval.
- if (MIHasDef && !MIHasUse) {
+ if (MI->readsWritesVirtualRegister(li.reg) ==
+ std::make_pair(false,true)) {
MBBVRegsMap.erase(MBB->getNumber());
ThisVReg = 0;
}
@@ -1652,103 +1571,9 @@ LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
}
std::vector<LiveInterval*> LiveIntervals::
-addIntervalsForSpillsFast(const LiveInterval &li,
- const MachineLoopInfo *loopInfo,
- VirtRegMap &vrm) {
- unsigned slot = vrm.assignVirt2StackSlot(li.reg);
-
- std::vector<LiveInterval*> added;
-
- assert(li.isSpillable() && "attempt to spill already spilled interval!");
-
- DEBUG({
- dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
- li.dump();
- dbgs() << '\n';
- });
-
- const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
-
- MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
- while (RI != mri_->reg_end()) {
- MachineInstr* MI = &*RI;
-
- SmallVector<unsigned, 2> Indices;
- bool HasUse = false;
- bool HasDef = false;
-
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& mop = MI->getOperand(i);
- if (!mop.isReg() || mop.getReg() != li.reg) continue;
-
- HasUse |= MI->getOperand(i).isUse();
- HasDef |= MI->getOperand(i).isDef();
-
- Indices.push_back(i);
- }
-
- if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
- Indices, true, slot, li.reg)) {
- unsigned NewVReg = mri_->createVirtualRegister(rc);
- vrm.grow();
- vrm.assignVirt2StackSlot(NewVReg, slot);
-
- // create a new register for this spill
- LiveInterval &nI = getOrCreateInterval(NewVReg);
- nI.markNotSpillable();
-
- // Rewrite register operands to use the new vreg.
- for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
- E = Indices.end(); I != E; ++I) {
- MI->getOperand(*I).setReg(NewVReg);
-
- if (MI->getOperand(*I).isUse())
- MI->getOperand(*I).setIsKill(true);
- }
-
- // Fill in the new live interval.
- SlotIndex index = getInstructionIndex(MI);
- if (HasUse) {
- LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
- nI.getNextValue(SlotIndex(), 0, false,
- getVNInfoAllocator()));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- vrm.addRestorePoint(NewVReg, MI);
- }
- if (HasDef) {
- LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
- nI.getNextValue(SlotIndex(), 0, false,
- getVNInfoAllocator()));
- DEBUG(dbgs() << " +" << LR);
- nI.addRange(LR);
- vrm.addSpillPoint(NewVReg, true, MI);
- }
-
- added.push_back(&nI);
-
- DEBUG({
- dbgs() << "\t\t\t\tadded new interval: ";
- nI.dump();
- dbgs() << '\n';
- });
- }
-
-
- RI = mri_->reg_begin(li.reg);
- }
-
- return added;
-}
-
-std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
-
- if (EnableFastSpilling)
- return addIntervalsForSpillsFast(li, loopInfo, vrm);
-
assert(li.isSpillable() && "attempt to spill already spilled interval!");
DEBUG({
@@ -2184,7 +2009,6 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
startInst, true, getVNInfoAllocator());
VN->setHasPHIKill(true);
- VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
LiveRange LR(
SlotIndex(getInstructionIndex(startInst).getDefIndex()),
getMBBEndIdx(startInst->getParent()), VN);
diff --git a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
index 798b9b9..709e2c6 100644
--- a/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveStackAnalysis.cpp
@@ -35,8 +35,8 @@ void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
}
void LiveStacks::releaseMemory() {
- // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
- VNInfoAllocator.DestroyAll();
+ // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
+ VNInfoAllocator.Reset();
S2IMap.clear();
S2RCMap.clear();
}
diff --git a/contrib/llvm/lib/CodeGen/LiveVariables.cpp b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
index 079684e..41b891d 100644
--- a/contrib/llvm/lib/CodeGen/LiveVariables.cpp
+++ b/contrib/llvm/lib/CodeGen/LiveVariables.cpp
@@ -286,7 +286,7 @@ MachineInstr *LiveVariables::FindLastRefOrPartRef(unsigned Reg) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
- return false;
+ return 0;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
@@ -609,7 +609,12 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) {
// Finally, if the last instruction in the block is a return, make sure to
// mark it as using all of the live-out values in the function.
- if (!MBB->empty() && MBB->back().getDesc().isReturn()) {
+ // Things marked both call and return are tail calls; do not do this for
+ // them. The tail callee need not take the same registers as input
+ // that it produces as output, and there are dependencies for its input
+ // registers elsewhere.
+ if (!MBB->empty() && MBB->back().getDesc().isReturn()
+ && !MBB->back().getDesc().isCall()) {
MachineInstr *Ret = &MBB->back();
for (MachineRegisterInfo::liveout_iterator
diff --git a/contrib/llvm/lib/CodeGen/LowerSubregs.cpp b/contrib/llvm/lib/CodeGen/LowerSubregs.cpp
index b0348a5..dfd4eae 100644
--- a/contrib/llvm/lib/CodeGen/LowerSubregs.cpp
+++ b/contrib/llvm/lib/CodeGen/LowerSubregs.cpp
@@ -53,15 +53,15 @@ namespace {
bool runOnMachineFunction(MachineFunction&);
private:
- bool LowerExtract(MachineInstr *MI);
- bool LowerInsert(MachineInstr *MI);
bool LowerSubregToReg(MachineInstr *MI);
+ bool LowerCopy(MachineInstr *MI);
void TransferDeadFlag(MachineInstr *MI, unsigned DstReg,
const TargetRegisterInfo *TRI);
void TransferKillFlag(MachineInstr *MI, unsigned SrcReg,
const TargetRegisterInfo *TRI,
bool AddIfNotFound = false);
+ void TransferImplicitDefs(MachineInstr *MI);
};
char LowerSubregsInstructionPass::ID = 0;
@@ -83,7 +83,7 @@ LowerSubregsInstructionPass::TransferDeadFlag(MachineInstr *MI,
if (MII->addRegisterDead(DstReg, TRI))
break;
assert(MII != MI->getParent()->begin() &&
- "copyRegToReg output doesn't reference destination register!");
+ "copyPhysReg output doesn't reference destination register!");
}
}
@@ -100,64 +100,24 @@ LowerSubregsInstructionPass::TransferKillFlag(MachineInstr *MI,
if (MII->addRegisterKilled(SrcReg, TRI, AddIfNotFound))
break;
assert(MII != MI->getParent()->begin() &&
- "copyRegToReg output doesn't reference source register!");
+ "copyPhysReg output doesn't reference source register!");
}
}
-bool LowerSubregsInstructionPass::LowerExtract(MachineInstr *MI) {
- MachineBasicBlock *MBB = MI->getParent();
-
- assert(MI->getOperand(0).isReg() && MI->getOperand(0).isDef() &&
- MI->getOperand(1).isReg() && MI->getOperand(1).isUse() &&
- MI->getOperand(2).isImm() && "Malformed extract_subreg");
-
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned SuperReg = MI->getOperand(1).getReg();
- unsigned SubIdx = MI->getOperand(2).getImm();
- unsigned SrcReg = TRI->getSubReg(SuperReg, SubIdx);
-
- assert(TargetRegisterInfo::isPhysicalRegister(SuperReg) &&
- "Extract supperg source must be a physical register");
- assert(TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- "Extract destination must be in a physical register");
- assert(SrcReg && "invalid subregister index for register");
-
- DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
- if (SrcReg == DstReg) {
- // No need to insert an identity copy instruction.
- if (MI->getOperand(1).isKill()) {
- // We must make sure the super-register gets killed. Replace the
- // instruction with KILL.
- MI->setDesc(TII->get(TargetOpcode::KILL));
- MI->RemoveOperand(2); // SubIdx
- DEBUG(dbgs() << "subreg: replace by: " << *MI);
- return true;
- }
-
- DEBUG(dbgs() << "subreg: eliminated!");
- } else {
- // Insert copy
- const TargetRegisterClass *TRCS = TRI->getPhysicalRegisterRegClass(DstReg);
- const TargetRegisterClass *TRCD = TRI->getPhysicalRegisterRegClass(SrcReg);
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstReg, SrcReg, TRCD, TRCS,
- MI->getDebugLoc());
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
- // Transfer the kill/dead flags, if needed.
- if (MI->getOperand(0).isDead())
- TransferDeadFlag(MI, DstReg, TRI);
- if (MI->getOperand(1).isKill())
- TransferKillFlag(MI, SuperReg, TRI, true);
- DEBUG({
- MachineBasicBlock::iterator dMI = MI;
- dbgs() << "subreg: " << *(--dMI);
- });
+/// TransferImplicitDefs - MI is a pseudo-instruction, and the lowered
+/// replacement instructions immediately precede it. Copy any implicit-def
+/// operands from MI to the replacement instruction.
+void
+LowerSubregsInstructionPass::TransferImplicitDefs(MachineInstr *MI) {
+ MachineBasicBlock::iterator CopyMI = MI;
+ --CopyMI;
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isImplicit() || MO.isUse())
+ continue;
+ CopyMI->addOperand(MachineOperand::CreateReg(MO.getReg(), true, true));
}
-
- DEBUG(dbgs() << '\n');
- MBB->erase(MI);
- return true;
}
bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
@@ -166,10 +126,10 @@ bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
MI->getOperand(1).isImm() &&
(MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
MI->getOperand(3).isImm() && "Invalid subreg_to_reg");
-
+
unsigned DstReg = MI->getOperand(0).getReg();
unsigned InsReg = MI->getOperand(2).getReg();
- unsigned InsSIdx = MI->getOperand(2).getSubReg();
+ assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?");
unsigned SubIdx = MI->getOperand(3).getImm();
assert(SubIdx != 0 && "Invalid index for insert_subreg");
@@ -182,27 +142,25 @@ bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
- if (DstSubReg == InsReg && InsSIdx == 0) {
+ if (DstSubReg == InsReg) {
// No need to insert an identify copy instruction.
// Watch out for case like this:
- // %RAX<def> = ...
- // %RAX<def> = SUBREG_TO_REG 0, %EAX:3<kill>, 3
- // The first def is defining RAX, not EAX so the top bits were not
- // zero extended.
+ // %RAX<def> = SUBREG_TO_REG 0, %EAX<kill>, 3
+ // We must leave %RAX live.
+ if (DstReg != InsReg) {
+ MI->setDesc(TII->get(TargetOpcode::KILL));
+ MI->RemoveOperand(3); // SubIdx
+ MI->RemoveOperand(1); // Imm
+ DEBUG(dbgs() << "subreg: replace by: " << *MI);
+ return true;
+ }
DEBUG(dbgs() << "subreg: eliminated!");
} else {
- // Insert sub-register copy
- const TargetRegisterClass *TRC0= TRI->getPhysicalRegisterRegClass(DstSubReg);
- const TargetRegisterClass *TRC1= TRI->getPhysicalRegisterRegClass(InsReg);
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstSubReg, InsReg, TRC0, TRC1,
- MI->getDebugLoc());
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
+ TII->copyPhysReg(*MBB, MI, MI->getDebugLoc(), DstSubReg, InsReg,
+ MI->getOperand(2).isKill());
// Transfer the kill/dead flags, if needed.
if (MI->getOperand(0).isDead())
TransferDeadFlag(MI, DstSubReg, TRI);
- if (MI->getOperand(2).isKill())
- TransferKillFlag(MI, InsReg, TRI);
DEBUG({
MachineBasicBlock::iterator dMI = MI;
dbgs() << "subreg: " << *(--dMI);
@@ -214,87 +172,39 @@ bool LowerSubregsInstructionPass::LowerSubregToReg(MachineInstr *MI) {
return true;
}
-bool LowerSubregsInstructionPass::LowerInsert(MachineInstr *MI) {
- MachineBasicBlock *MBB = MI->getParent();
- assert((MI->getOperand(0).isReg() && MI->getOperand(0).isDef()) &&
- (MI->getOperand(1).isReg() && MI->getOperand(1).isUse()) &&
- (MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
- MI->getOperand(3).isImm() && "Invalid insert_subreg");
-
- unsigned DstReg = MI->getOperand(0).getReg();
-#ifndef NDEBUG
- unsigned SrcReg = MI->getOperand(1).getReg();
-#endif
- unsigned InsReg = MI->getOperand(2).getReg();
- unsigned SubIdx = MI->getOperand(3).getImm();
+bool LowerSubregsInstructionPass::LowerCopy(MachineInstr *MI) {
+ MachineOperand &DstMO = MI->getOperand(0);
+ MachineOperand &SrcMO = MI->getOperand(1);
- assert(DstReg == SrcReg && "insert_subreg not a two-address instruction?");
- assert(SubIdx != 0 && "Invalid index for insert_subreg");
- unsigned DstSubReg = TRI->getSubReg(DstReg, SubIdx);
- assert(DstSubReg && "invalid subregister index for register");
- assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- "Insert superreg source must be in a physical register");
- assert(TargetRegisterInfo::isPhysicalRegister(InsReg) &&
- "Inserted value must be in a physical register");
-
- DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
-
- if (DstSubReg == InsReg) {
- // No need to insert an identity copy instruction. If the SrcReg was
- // <undef>, we need to make sure it is alive by inserting a KILL
- if (MI->getOperand(1).isUndef() && !MI->getOperand(0).isDead()) {
- MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::KILL), DstReg);
- if (MI->getOperand(2).isUndef())
- MIB.addReg(InsReg, RegState::Undef);
- else
- MIB.addReg(InsReg, RegState::Kill);
- } else {
- DEBUG(dbgs() << "subreg: eliminated!\n");
- MBB->erase(MI);
+ if (SrcMO.getReg() == DstMO.getReg()) {
+ DEBUG(dbgs() << "identity copy: " << *MI);
+ // No need to insert an identity copy instruction, but replace with a KILL
+ // if liveness is changed.
+ if (DstMO.isDead() || SrcMO.isUndef() || MI->getNumOperands() > 2) {
+ // We must make sure the super-register gets killed. Replace the
+ // instruction with KILL.
+ MI->setDesc(TII->get(TargetOpcode::KILL));
+ DEBUG(dbgs() << "replaced by: " << *MI);
return true;
}
- } else {
- // Insert sub-register copy
- const TargetRegisterClass *TRC0= TRI->getPhysicalRegisterRegClass(DstSubReg);
- const TargetRegisterClass *TRC1= TRI->getPhysicalRegisterRegClass(InsReg);
- if (MI->getOperand(2).isUndef())
- // If the source register being inserted is undef, then this becomes a
- // KILL.
- BuildMI(*MBB, MI, MI->getDebugLoc(),
- TII->get(TargetOpcode::KILL), DstSubReg);
- else {
- bool Emitted = TII->copyRegToReg(*MBB, MI, DstSubReg, InsReg, TRC0, TRC1,
- MI->getDebugLoc());
- (void)Emitted;
- assert(Emitted && "Subreg and Dst must be of compatible register class");
- }
- MachineBasicBlock::iterator CopyMI = MI;
- --CopyMI;
-
- // INSERT_SUBREG is a two-address instruction so it implicitly kills SrcReg.
- if (!MI->getOperand(1).isUndef())
- CopyMI->addOperand(MachineOperand::CreateReg(DstReg, false, true, true));
-
- // Transfer the kill/dead flags, if needed.
- if (MI->getOperand(0).isDead()) {
- TransferDeadFlag(MI, DstSubReg, TRI);
- } else {
- // Make sure the full DstReg is live after this replacement.
- CopyMI->addOperand(MachineOperand::CreateReg(DstReg, true, true));
- }
-
- // Make sure the inserted register gets killed
- if (MI->getOperand(2).isKill() && !MI->getOperand(2).isUndef())
- TransferKillFlag(MI, InsReg, TRI);
+ // Vanilla identity copy.
+ MI->eraseFromParent();
+ return true;
}
- DEBUG({
- MachineBasicBlock::iterator dMI = MI;
- dbgs() << "subreg: " << *(--dMI) << "\n";
- });
+ DEBUG(dbgs() << "real copy: " << *MI);
+ TII->copyPhysReg(*MI->getParent(), MI, MI->getDebugLoc(),
+ DstMO.getReg(), SrcMO.getReg(), SrcMO.isKill());
- MBB->erase(MI);
+ if (DstMO.isDead())
+ TransferDeadFlag(MI, DstMO.getReg(), TRI);
+ if (MI->getNumOperands() > 2)
+ TransferImplicitDefs(MI);
+ DEBUG({
+ MachineBasicBlock::iterator dMI = MI;
+ dbgs() << "replaced by: " << *(--dMI);
+ });
+ MI->eraseFromParent();
return true;
}
@@ -317,12 +227,13 @@ bool LowerSubregsInstructionPass::runOnMachineFunction(MachineFunction &MF) {
mi != me;) {
MachineBasicBlock::iterator nmi = llvm::next(mi);
MachineInstr *MI = mi;
- if (MI->isExtractSubreg()) {
- MadeChange |= LowerExtract(MI);
- } else if (MI->isInsertSubreg()) {
- MadeChange |= LowerInsert(MI);
- } else if (MI->isSubregToReg()) {
+ assert(!MI->isInsertSubreg() && "INSERT_SUBREG should no longer appear");
+ assert(MI->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
+ "EXTRACT_SUBREG should no longer appear");
+ if (MI->isSubregToReg()) {
MadeChange |= LowerSubregToReg(MI);
+ } else if (MI->isCopy()) {
+ MadeChange |= LowerCopy(MI);
}
mi = nmi;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
index eaaa1f8..a27ee47 100644
--- a/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineBasicBlock.cpp
@@ -13,7 +13,10 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/BasicBlock.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -136,6 +139,13 @@ void ilist_traits<MachineInstr>::deleteNode(MachineInstr* MI) {
Parent->getParent()->DeleteMachineInstr(MI);
}
+MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() {
+ iterator I = begin();
+ while (I != end() && I->isPHI())
+ ++I;
+ return I;
+}
+
MachineBasicBlock::iterator MachineBasicBlock::getFirstTerminator() {
iterator I = end();
while (I != begin() && (--I)->getDesc().isTerminator())
@@ -245,6 +255,7 @@ void MachineBasicBlock::updateTerminator() {
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
(void) B;
assert(!B && "UpdateTerminators requires analyzable predecessors!");
@@ -259,7 +270,7 @@ void MachineBasicBlock::updateTerminator() {
// its layout successor, insert a branch.
TBB = *succ_begin();
if (!isLayoutSuccessor(TBB))
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
if (FBB) {
@@ -270,10 +281,10 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond))
return;
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, FBB, 0, Cond);
+ TII->InsertBranch(*this, FBB, 0, Cond, dl);
} else if (isLayoutSuccessor(FBB)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
// The block has a fallthrough conditional branch.
@@ -284,14 +295,14 @@ void MachineBasicBlock::updateTerminator() {
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
return;
}
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
} else if (!isLayoutSuccessor(MBBA)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, MBBA, Cond);
+ TII->InsertBranch(*this, TBB, MBBA, Cond, dl);
}
}
}
@@ -331,12 +342,32 @@ void MachineBasicBlock::transferSuccessors(MachineBasicBlock *fromMBB) {
if (this == fromMBB)
return;
- for (MachineBasicBlock::succ_iterator I = fromMBB->succ_begin(),
- E = fromMBB->succ_end(); I != E; ++I)
- addSuccessor(*I);
+ while (!fromMBB->succ_empty()) {
+ MachineBasicBlock *Succ = *fromMBB->succ_begin();
+ addSuccessor(Succ);
+ fromMBB->removeSuccessor(Succ);
+ }
+}
+
+void
+MachineBasicBlock::transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB) {
+ if (this == fromMBB)
+ return;
- while (!fromMBB->succ_empty())
- fromMBB->removeSuccessor(fromMBB->succ_begin());
+ while (!fromMBB->succ_empty()) {
+ MachineBasicBlock *Succ = *fromMBB->succ_begin();
+ addSuccessor(Succ);
+ fromMBB->removeSuccessor(Succ);
+
+ // Fix up any PHI nodes in the successor.
+ for (MachineBasicBlock::iterator MI = Succ->begin(), ME = Succ->end();
+ MI != ME && MI->isPHI(); ++MI)
+ for (unsigned i = 2, e = MI->getNumOperands()+1; i != e; i += 2) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.getMBB() == fromMBB)
+ MO.setMBB(this);
+ }
+ }
}
bool MachineBasicBlock::isSuccessor(const MachineBasicBlock *MBB) const {
@@ -395,6 +426,82 @@ bool MachineBasicBlock::canFallThrough() {
return FBB == 0;
}
+MachineBasicBlock *
+MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
+ MachineFunction *MF = getParent();
+ DebugLoc dl; // FIXME: this is nowhere
+
+ // We may need to update this's terminator, but we can't do that if AnalyzeBranch
+ // fails. If this uses a jump table, we won't touch it.
+ const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+ MachineBasicBlock *TBB = 0, *FBB = 0;
+ SmallVector<MachineOperand, 4> Cond;
+ if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
+ return NULL;
+
+ MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
+ MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
+ DEBUG(dbgs() << "PHIElimination splitting critical edge:"
+ " BB#" << getNumber()
+ << " -- BB#" << NMBB->getNumber()
+ << " -- BB#" << Succ->getNumber() << '\n');
+
+ ReplaceUsesOfBlockWith(Succ, NMBB);
+ updateTerminator();
+
+ // Insert unconditional "jump Succ" instruction in NMBB if necessary.
+ NMBB->addSuccessor(Succ);
+ if (!NMBB->isLayoutSuccessor(Succ)) {
+ Cond.clear();
+ MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
+ }
+
+ // Fix PHI nodes in Succ so they refer to NMBB instead of this
+ for (MachineBasicBlock::iterator i = Succ->begin(), e = Succ->end();
+ i != e && i->isPHI(); ++i)
+ for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
+ if (i->getOperand(ni+1).getMBB() == this)
+ i->getOperand(ni+1).setMBB(NMBB);
+
+ if (LiveVariables *LV =
+ P->getAnalysisIfAvailable<LiveVariables>())
+ LV->addNewBlock(NMBB, this, Succ);
+
+ if (MachineDominatorTree *MDT =
+ P->getAnalysisIfAvailable<MachineDominatorTree>())
+ MDT->addNewBlock(NMBB, this);
+
+ if (MachineLoopInfo *MLI =
+ P->getAnalysisIfAvailable<MachineLoopInfo>())
+ if (MachineLoop *TIL = MLI->getLoopFor(this)) {
+ // If one or the other blocks were not in a loop, the new block is not
+ // either, and thus LI doesn't need to be updated.
+ if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
+ if (TIL == DestLoop) {
+ // Both in the same loop, the NMBB joins loop.
+ DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else if (TIL->contains(DestLoop)) {
+ // Edge from an outer loop to an inner loop. Add to the outer loop.
+ TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else if (DestLoop->contains(TIL)) {
+ // Edge from an inner loop to an outer loop. Add to the outer loop.
+ DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+ } else {
+ // Edge from two loops with no containment relation. Because these
+ // are natural loops, we know that the destination block must be the
+ // header of its loop (adding a branch into a loop elsewhere would
+ // create an irreducible loop).
+ assert(DestLoop->getHeader() == Succ &&
+ "Should not create irreducible loops!");
+ if (MachineLoop *P = DestLoop->getParentLoop())
+ P->addBasicBlockToLoop(NMBB, MLI->getBase());
+ }
+ }
+ }
+
+ return NMBB;
+}
+
/// removeFromParent - This method unlinks 'this' from the containing function,
/// and returns it, but does not delete it.
MachineBasicBlock *MachineBasicBlock::removeFromParent() {
diff --git a/contrib/llvm/lib/CodeGen/MachineCSE.cpp b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
index 6f4f7a8..833cc00 100644
--- a/contrib/llvm/lib/CodeGen/MachineCSE.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineCSE.cpp
@@ -30,9 +30,7 @@ using namespace llvm;
STATISTIC(NumCoalesces, "Number of copies coalesced");
STATISTIC(NumCSEs, "Number of common subexpression eliminated");
-
-static cl::opt<bool> CSEPhysDef("machine-cse-phys-defs",
- cl::init(false), cl::Hidden);
+STATISTIC(NumPhysCSEs, "Number of phyreg defining common subexpr eliminated");
namespace {
class MachineCSE : public MachineFunctionPass {
@@ -128,6 +126,28 @@ bool MachineCSE::PerformTrivialCoalescing(MachineInstr *MI,
++NumCoalesces;
Changed = true;
}
+
+ if (!DefMI->isCopy())
+ continue;
+ SrcReg = DefMI->getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg))
+ continue;
+ if (DefMI->getOperand(0).getSubReg() || DefMI->getOperand(1).getSubReg())
+ continue;
+ const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
+ const TargetRegisterClass *RC = MRI->getRegClass(Reg);
+ const TargetRegisterClass *NewRC = getCommonSubClass(RC, SRC);
+ if (!NewRC)
+ continue;
+ DEBUG(dbgs() << "Coalescing: " << *DefMI);
+ DEBUG(dbgs() << "*** to: " << *MI);
+ MO.setReg(SrcReg);
+ MRI->clearKillFlags(SrcReg);
+ if (NewRC != SRC)
+ MRI->setRegClass(SrcReg, NewRC);
+ DefMI->eraseFromParent();
+ ++NumCoalesces;
+ Changed = true;
}
return Changed;
@@ -172,7 +192,8 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
/// hasLivePhysRegDefUse - Return true if the specified instruction read / write
/// physical registers (except for dead defs of physical registers). It also
-/// returns the physical register def by reference if it's the only one.
+/// returns the physical register def by reference if it's the only one and the
+/// instruction does not uses a physical register.
bool MachineCSE::hasLivePhysRegDefUse(const MachineInstr *MI,
const MachineBasicBlock *MBB,
unsigned &PhysDef) const {
@@ -186,9 +207,11 @@ bool MachineCSE::hasLivePhysRegDefUse(const MachineInstr *MI,
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- if (MO.isUse())
+ if (MO.isUse()) {
// Can't touch anything to read a physical register.
+ PhysDef = 0;
return true;
+ }
if (MO.isDead())
// If the def is dead, it's ok.
continue;
@@ -240,8 +263,8 @@ bool MachineCSE::PhysRegDefReaches(MachineInstr *CSMI, MachineInstr *MI,
static bool isCopy(const MachineInstr *MI, const TargetInstrInfo *TII) {
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- return TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) ||
- MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg();
+ return MI->isCopyLike() ||
+ TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
}
bool MachineCSE::isCSECandidate(MachineInstr *MI) {
@@ -356,6 +379,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
if (!isCSECandidate(MI))
continue;
+ bool DefPhys = false;
bool FoundCSE = VNT.count(MI);
if (!FoundCSE) {
// Look for trivial copy coalescing opportunities.
@@ -376,11 +400,13 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// ... Unless the CS is local and it also defines the physical register
// which is not clobbered in between.
- if (PhysDef && CSEPhysDef) {
+ if (PhysDef) {
unsigned CSVN = VNT.lookup(MI);
MachineInstr *CSMI = Exps[CSVN];
- if (PhysRegDefReaches(CSMI, MI, PhysDef))
+ if (PhysRegDefReaches(CSMI, MI, PhysDef)) {
FoundCSE = true;
+ DefPhys = true;
+ }
}
}
@@ -426,6 +452,8 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
}
MI->eraseFromParent();
++NumCSEs;
+ if (DefPhys)
+ ++NumPhysCSEs;
} else {
DEBUG(dbgs() << "*** Not profitable, avoid CSE!\n");
VNT.insert(MI, CurrVN++);
diff --git a/contrib/llvm/lib/CodeGen/MachineDominators.cpp b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
index 4088739..b5f8fbb 100644
--- a/contrib/llvm/lib/CodeGen/MachineDominators.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineDominators.cpp
@@ -46,7 +46,6 @@ MachineDominatorTree::MachineDominatorTree()
}
MachineDominatorTree::~MachineDominatorTree() {
- DT->releaseMemory();
delete DT;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
index a38c881..666120f 100644
--- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp
@@ -378,7 +378,7 @@ void MachineFunction::viewCFG() const
#ifndef NDEBUG
ViewGraph(this, "mf" + getFunction()->getNameStr());
#else
- errs() << "SelectionDAG::viewGraph is only available in debug builds on "
+ errs() << "MachineFunction::viewCFG is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
@@ -388,7 +388,7 @@ void MachineFunction::viewCFGOnly() const
#ifndef NDEBUG
ViewGraph(this, "mf" + getFunction()->getNameStr(), true);
#else
- errs() << "SelectionDAG::viewGraph is only available in debug builds on "
+ errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif // NDEBUG
}
@@ -438,10 +438,16 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
/// index with a negative value.
///
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isSS) {
+ bool Immutable) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
- Objects.insert(Objects.begin(), StackObject(Size, 1, SPOffset, Immutable,
- isSS));
+ // The alignment of the frame index can be determined from its offset from
+ // the incoming frame position. If the frame object is at offset 32 and
+ // the stack is guaranteed to be 16-byte aligned, then we know that the
+ // object is 16-byte aligned.
+ unsigned StackAlign = TFI.getStackAlignment();
+ unsigned Align = MinAlign(SPOffset, StackAlign);
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
+ /*isSS*/false));
return -++NumFixedObjects;
}
diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
index e54cd5c..6b2e985 100644
--- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp
@@ -111,6 +111,26 @@ void MachineOperand::setReg(unsigned Reg) {
Contents.Reg.RegNo = Reg;
}
+void MachineOperand::substVirtReg(unsigned Reg, unsigned SubIdx,
+ const TargetRegisterInfo &TRI) {
+ assert(TargetRegisterInfo::isVirtualRegister(Reg));
+ if (SubIdx && getSubReg())
+ SubIdx = TRI.composeSubRegIndices(SubIdx, getSubReg());
+ setReg(Reg);
+ if (SubIdx)
+ setSubReg(SubIdx);
+}
+
+void MachineOperand::substPhysReg(unsigned Reg, const TargetRegisterInfo &TRI) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ if (getSubReg()) {
+ Reg = TRI.getSubReg(Reg, getSubReg());
+ assert(Reg && "Invalid SubReg for physical register");
+ setSubReg(0);
+ }
+ setReg(Reg);
+}
+
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
@@ -861,14 +881,14 @@ int MachineInstr::findFirstPredOperandIdx() const {
bool MachineInstr::
isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
if (isInlineAsm()) {
- assert(DefOpIdx >= 2);
+ assert(DefOpIdx >= 3);
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0)
return false;
// Determine the actual operand index that corresponds to this index.
unsigned DefNo = 0;
unsigned DefPart = 0;
- for (unsigned i = 1, e = getNumOperands(); i < e; ) {
+ for (unsigned i = 2, e = getNumOperands(); i < e; ) {
const MachineOperand &FMO = getOperand(i);
// After the normal asm operands there may be additional imp-def regs.
if (!FMO.isImm())
@@ -883,7 +903,7 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
}
++DefNo;
}
- for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+ for (unsigned i = 2, e = getNumOperands(); i != e; ++i) {
const MachineOperand &FMO = getOperand(i);
if (!FMO.isImm())
continue;
@@ -926,7 +946,7 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
// Find the flag operand corresponding to UseOpIdx
unsigned FlagIdx, NumOps=0;
- for (FlagIdx = 1; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
+ for (FlagIdx = 2; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
const MachineOperand &UFMO = getOperand(FlagIdx);
// After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm())
@@ -944,9 +964,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
if (!DefOpIdx)
return true;
- unsigned DefIdx = 1;
- // Remember to adjust the index. First operand is asm string, then there
- // is a flag for each.
+ unsigned DefIdx = 2;
+ // Remember to adjust the index. First operand is asm string, second is
+ // the AlignStack bit, then there is a flag for each.
while (DefNo) {
const MachineOperand &FMO = getOperand(DefIdx);
assert(FMO.isImm());
@@ -1017,6 +1037,29 @@ void MachineInstr::copyPredicates(const MachineInstr *MI) {
}
}
+void MachineInstr::substituteRegister(unsigned FromReg,
+ unsigned ToReg,
+ unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo) {
+ if (TargetRegisterInfo::isPhysicalRegister(ToReg)) {
+ if (SubIdx)
+ ToReg = RegInfo.getSubReg(ToReg, SubIdx);
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.getReg() != FromReg)
+ continue;
+ MO.substPhysReg(ToReg, RegInfo);
+ }
+ } else {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || MO.getReg() != FromReg)
+ continue;
+ MO.substVirtReg(ToReg, SubIdx, RegInfo);
+ }
+ }
+}
+
/// isSafeToMove - Return true if it is safe to move this instruction. If
/// SawStore is set to true, it means that there is a store (or call) between
/// the instruction's location and its intended destination.
@@ -1168,6 +1211,28 @@ void MachineInstr::dump() const {
dbgs() << " " << *this;
}
+static void printDebugLoc(DebugLoc DL, const MachineFunction *MF,
+ raw_ostream &CommentOS) {
+ const LLVMContext &Ctx = MF->getFunction()->getContext();
+ if (!DL.isUnknown()) { // Print source line info.
+ DIScope Scope(DL.getScope(Ctx));
+ // Omit the directory, because it's likely to be long and uninteresting.
+ if (Scope.Verify())
+ CommentOS << Scope.getFilename();
+ else
+ CommentOS << "<unknown>";
+ CommentOS << ':' << DL.getLine();
+ if (DL.getCol() != 0)
+ CommentOS << ':' << DL.getCol();
+ DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+ if (!InlinedAtDL.isUnknown()) {
+ CommentOS << " @[ ";
+ printDebugLoc(InlinedAtDL, MF, CommentOS);
+ CommentOS << " ]";
+ }
+ }
+}
+
void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
// We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
const MachineFunction *MF = 0;
@@ -1240,6 +1305,8 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
OS << "!\"" << MDS->getString() << '\"';
else
MO.print(OS, TM);
+ } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
+ OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
} else
MO.print(OS, TM);
}
@@ -1265,19 +1332,8 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
if (!debugLoc.isUnknown() && MF) {
if (!HaveSemi) OS << ";";
-
- // TODO: print InlinedAtLoc information
-
- DIScope Scope(debugLoc.getScope(MF->getFunction()->getContext()));
OS << " dbg:";
- // Omit the directory, since it's usually long and uninteresting.
- if (Scope.Verify())
- OS << Scope.getFilename();
- else
- OS << "<unknown>";
- OS << ':' << debugLoc.getLine();
- if (debugLoc.getCol() != 0)
- OS << ':' << debugLoc.getCol();
+ printDebugLoc(debugLoc, MF, OS);
}
OS << "\n";
@@ -1418,6 +1474,25 @@ void MachineInstr::addRegisterDefined(unsigned IncomingReg,
true /*IsImp*/));
}
+void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+ const TargetRegisterInfo &TRI) {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = getOperand(i);
+ if (!MO.isReg() || !MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ bool Dead = true;
+ for (SmallVectorImpl<unsigned>::const_iterator I = UsedRegs.begin(),
+ E = UsedRegs.end(); I != E; ++I)
+ if (TRI.regsOverlap(*I, Reg)) {
+ Dead = false;
+ break;
+ }
+ // If there are no uses, including partial uses, the def is dead.
+ if (Dead) MO.setIsDead();
+ }
+}
+
unsigned
MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
unsigned Hash = MI->getOpcode() * 37;
diff --git a/contrib/llvm/lib/CodeGen/MachineLICM.cpp b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
index 6120617..4c054f5 100644
--- a/contrib/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineLICM.cpp
@@ -62,6 +62,7 @@ namespace {
// State that is updated as we process loops
bool Changed; // True if a loop is changed.
+ bool FirstInLoop; // True if it's the first LICM in the loop.
MachineLoop *CurLoop; // The current loop we are working on.
MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
@@ -82,7 +83,6 @@ namespace {
const char *getPassName() const { return "Machine Instruction LICM"; }
- // FIXME: Loop preheaders?
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineLoopInfo>();
@@ -127,8 +127,8 @@ namespace {
void AddToLiveIns(unsigned Reg);
/// IsLICMCandidate - Returns true if the instruction may be a suitable
- /// candidate for LICM. e.g. If the instruction is a call, then it's obviously
- /// not safe to hoist it.
+ /// candidate for LICM. e.g. If the instruction is a call, then it's
+ /// obviously not safe to hoist it.
bool IsLICMCandidate(MachineInstr &I);
/// IsLoopInvariantInst - Returns true if the instruction is loop
@@ -181,6 +181,10 @@ namespace {
/// current loop preheader that may become duplicates of instructions that
/// are hoisted out of the loop.
void InitCSEMap(MachineBasicBlock *BB);
+
+ /// getCurPreheader - Get the preheader for the current loop, splitting
+ /// a critical edge if needed.
+ MachineBasicBlock *getCurPreheader();
};
} // end anonymous namespace
@@ -192,12 +196,17 @@ FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
return new MachineLICM(PreRegAlloc);
}
-/// LoopIsOuterMostWithPreheader - Test if the given loop is the outer-most
-/// loop that has a preheader.
-static bool LoopIsOuterMostWithPreheader(MachineLoop *CurLoop) {
+/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
+/// loop that has a unique predecessor.
+static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
+ // Check whether this loop even has a unique predecessor.
+ if (!CurLoop->getLoopPredecessor())
+ return false;
+ // Ok, now check to see if any of its outer loops do.
for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
- if (L->getLoopPreheader())
+ if (L->getLoopPredecessor())
return false;
+ // None of them did, so this is the outermost with a unique predecessor.
return true;
}
@@ -207,7 +216,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
else
DEBUG(dbgs() << "******** Post-regalloc Machine LICM ********\n");
- Changed = false;
+ Changed = FirstInLoop = false;
TM = &MF.getTarget();
TII = TM->getInstrInfo();
TRI = TM->getRegisterInfo();
@@ -220,23 +229,17 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
DT = &getAnalysis<MachineDominatorTree>();
AA = &getAnalysis<AliasAnalysis>();
- for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I){
- CurLoop = *I;
+ SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
+ while (!Worklist.empty()) {
+ CurLoop = Worklist.pop_back_val();
+ CurPreheader = 0;
// If this is done before regalloc, only visit outer-most preheader-sporting
// loops.
- if (PreRegAlloc && !LoopIsOuterMostWithPreheader(CurLoop))
- continue;
-
- // Determine the block to which to hoist instructions. If we can't find a
- // suitable loop preheader, we can't do any hoisting.
- //
- // FIXME: We are only hoisting if the basic block coming into this loop
- // has only one successor. This isn't the case in general because we haven't
- // broken critical edges or added preheaders.
- CurPreheader = CurLoop->getLoopPreheader();
- if (!CurPreheader)
+ if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
+ Worklist.append(CurLoop->begin(), CurLoop->end());
continue;
+ }
if (!PreRegAlloc)
HoistRegionPostRA();
@@ -244,6 +247,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
// CSEMap is initialized for loop header when the first instruction is
// being hoisted.
MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
+ FirstInLoop = true;
HoistRegion(N);
CSEMap.clear();
}
@@ -436,13 +440,16 @@ void MachineLICM::AddToLiveIns(unsigned Reg) {
/// operands that is safe to hoist, this instruction is called to do the
/// dirty work.
void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader) return;
+
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
DEBUG({
dbgs() << "Hoisting " << *MI;
- if (CurPreheader->getBasicBlock())
+ if (Preheader->getBasicBlock())
dbgs() << " to MachineBasicBlock "
- << CurPreheader->getName();
+ << Preheader->getName();
if (MI->getParent()->getBasicBlock())
dbgs() << " from MachineBasicBlock "
<< MI->getParent()->getName();
@@ -451,7 +458,7 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
- CurPreheader->splice(CurPreheader->getFirstTerminator(), MBB, MI);
+ Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
// Add register to livein list to all the BBs in the current loop since a
// loop invariant must be kept live throughout the whole loop. This is
@@ -490,26 +497,11 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N) {
/// candidate for LICM. e.g. If the instruction is a call, then it's obviously
/// not safe to hoist it.
bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
- if (I.isImplicitDef())
+ // Check if it's safe to move the instruction.
+ bool DontMoveAcrossStore = true;
+ if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
return false;
-
- const TargetInstrDesc &TID = I.getDesc();
- // Ignore stuff that we obviously can't hoist.
- if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
- TID.hasUnmodeledSideEffects())
- return false;
-
- if (TID.mayLoad()) {
- // Okay, this instruction does a load. As a refinement, we allow the target
- // to decide whether the loaded value is actually a constant. If so, we can
- // actually use it as a load.
- if (!I.isInvariantLoad(AA))
- // FIXME: we should be able to hoist loads with no other side effects if
- // there are no other instructions which can change memory in this loop.
- // This is a trivial form of alias analysis.
- return false;
- }
return true;
}
@@ -720,7 +712,9 @@ MachineLICM::LookForDuplicate(const MachineInstr *MI,
bool MachineLICM::EliminateCSE(MachineInstr *MI,
DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
- if (CI == CSEMap.end())
+ // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
+ // the undef property onto uses.
+ if (CI == CSEMap.end() || MI->isImplicitDef())
return false;
if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
@@ -754,6 +748,9 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI,
/// that are safe to hoist, this instruction is called to do the dirty work.
///
void MachineLICM::Hoist(MachineInstr *MI) {
+ MachineBasicBlock *Preheader = getCurPreheader();
+ if (!Preheader) return;
+
// First check whether we should hoist this instruction.
if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
// If not, try unfolding a hoistable load.
@@ -765,9 +762,9 @@ void MachineLICM::Hoist(MachineInstr *MI) {
// terminator instructions.
DEBUG({
dbgs() << "Hoisting " << *MI;
- if (CurPreheader->getBasicBlock())
+ if (Preheader->getBasicBlock())
dbgs() << " to MachineBasicBlock "
- << CurPreheader->getName();
+ << Preheader->getName();
if (MI->getParent()->getBasicBlock())
dbgs() << " from MachineBasicBlock "
<< MI->getParent()->getName();
@@ -776,7 +773,10 @@ void MachineLICM::Hoist(MachineInstr *MI) {
// If this is the first instruction being hoisted to the preheader,
// initialize the CSE map with potential common expressions.
- InitCSEMap(CurPreheader);
+ if (FirstInLoop) {
+ InitCSEMap(Preheader);
+ FirstInLoop = false;
+ }
// Look for opportunity to CSE the hoisted instruction.
unsigned Opcode = MI->getOpcode();
@@ -784,7 +784,7 @@ void MachineLICM::Hoist(MachineInstr *MI) {
CI = CSEMap.find(Opcode);
if (!EliminateCSE(MI, CI)) {
// Otherwise, splice the instruction to the preheader.
- CurPreheader->splice(CurPreheader->getFirstTerminator(),MI->getParent(),MI);
+ Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
// Clear the kill flags of any register this instruction defines,
// since they may need to be live throughout the entire loop
@@ -808,3 +808,30 @@ void MachineLICM::Hoist(MachineInstr *MI) {
++NumHoisted;
Changed = true;
}
+
+MachineBasicBlock *MachineLICM::getCurPreheader() {
+ // Determine the block to which to hoist instructions. If we can't find a
+ // suitable loop predecessor, we can't do any hoisting.
+
+ // If we've tried to get a preheader and failed, don't try again.
+ if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
+ return 0;
+
+ if (!CurPreheader) {
+ CurPreheader = CurLoop->getLoopPreheader();
+ if (!CurPreheader) {
+ MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
+ if (!Pred) {
+ CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+ return 0;
+ }
+
+ CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
+ if (!CurPreheader) {
+ CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+ return 0;
+ }
+ }
+ }
+ return CurPreheader;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
index 25284d6..15778b4 100644
--- a/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineModuleInfo.cpp
@@ -563,3 +563,26 @@ unsigned MachineModuleInfo::getPersonalityIndex() const {
return 0;
}
+namespace {
+ /// VariableDebugSorter - Comparison to sort the VariableDbgInfo map
+ /// by source location, to avoid depending on the arbitrary order that
+ /// instruction selection visits variables in.
+ struct VariableDebugSorter {
+ bool operator()(const MachineModuleInfo::VariableDbgInfoMapTy::value_type &A,
+ const MachineModuleInfo::VariableDbgInfoMapTy::value_type &B)
+ const {
+ if (A.second.second.getLine() != B.second.second.getLine())
+ return A.second.second.getLine() < B.second.second.getLine();
+ if (A.second.second.getCol() != B.second.second.getCol())
+ return A.second.second.getCol() < B.second.second.getCol();
+ return false;
+ }
+ };
+}
+
+MachineModuleInfo::VariableDbgInfoMapTy &
+MachineModuleInfo::getVariableDbgInfo() {
+ std::stable_sort(VariableDbgInfo.begin(), VariableDbgInfo.end(),
+ VariableDebugSorter());
+ return VariableDbgInfo;
+}
diff --git a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
index 70bf7e5..5d852f2 100644
--- a/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineRegisterInfo.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
- RegClass2VRegMap.resize(TRI.getNumRegClasses()+1); // RC ID starts at 1.
+ RegClass2VRegMap = new std::vector<unsigned>[TRI.getNumRegClasses()];
UsedPhysRegs.resize(TRI.getNumRegs());
// Create the physreg use/def lists.
@@ -37,6 +37,7 @@ MachineRegisterInfo::~MachineRegisterInfo() {
"PhysRegUseDefLists has entries after all instructions are deleted");
#endif
delete [] PhysRegUseDefLists;
+ delete [] RegClass2VRegMap;
}
/// setRegClass - Set the register class of the specified virtual register.
@@ -52,7 +53,7 @@ MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
// Remove from old register class's vregs list. This may be slow but
// fortunately this operation is rarely needed.
std::vector<unsigned> &VRegs = RegClass2VRegMap[OldRC->getID()];
- std::vector<unsigned>::iterator I=std::find(VRegs.begin(), VRegs.end(), VR);
+ std::vector<unsigned>::iterator I = std::find(VRegs.begin(), VRegs.end(), VR);
VRegs.erase(I);
// Add to new register class's vregs list.
@@ -174,115 +175,36 @@ unsigned MachineRegisterInfo::getLiveInVirtReg(unsigned PReg) const {
return 0;
}
-static cl::opt<bool>
-SchedLiveInCopies("schedule-livein-copies", cl::Hidden,
- cl::desc("Schedule copies of livein registers"),
- cl::init(false));
-
-/// EmitLiveInCopy - Emit a copy for a live in physical register. If the
-/// physical register has only a single copy use, then coalesced the copy
-/// if possible.
-static void EmitLiveInCopy(MachineBasicBlock *MBB,
- MachineBasicBlock::iterator &InsertPos,
- unsigned VirtReg, unsigned PhysReg,
- const TargetRegisterClass *RC,
- DenseMap<MachineInstr*, unsigned> &CopyRegMap,
- const MachineRegisterInfo &MRI,
- const TargetRegisterInfo &TRI,
- const TargetInstrInfo &TII) {
- unsigned NumUses = 0;
- MachineInstr *UseMI = NULL;
- for (MachineRegisterInfo::use_iterator UI = MRI.use_begin(VirtReg),
- UE = MRI.use_end(); UI != UE; ++UI) {
- UseMI = &*UI;
- if (++NumUses > 1)
- break;
- }
-
- // If the number of uses is not one, or the use is not a move instruction,
- // don't coalesce. Also, only coalesce away a virtual register to virtual
- // register copy.
- bool Coalesced = false;
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (NumUses == 1 &&
- TII.isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- TargetRegisterInfo::isVirtualRegister(DstReg)) {
- VirtReg = DstReg;
- Coalesced = true;
- }
-
- // Now find an ideal location to insert the copy.
- MachineBasicBlock::iterator Pos = InsertPos;
- while (Pos != MBB->begin()) {
- MachineInstr *PrevMI = prior(Pos);
- DenseMap<MachineInstr*, unsigned>::iterator RI = CopyRegMap.find(PrevMI);
- // copyRegToReg might emit multiple instructions to do a copy.
- unsigned CopyDstReg = (RI == CopyRegMap.end()) ? 0 : RI->second;
- if (CopyDstReg && !TRI.regsOverlap(CopyDstReg, PhysReg))
- // This is what the BB looks like right now:
- // r1024 = mov r0
- // ...
- // r1 = mov r1024
- //
- // We want to insert "r1025 = mov r1". Inserting this copy below the
- // move to r1024 makes it impossible for that move to be coalesced.
- //
- // r1025 = mov r1
- // r1024 = mov r0
- // ...
- // r1 = mov 1024
- // r2 = mov 1025
- break; // Woot! Found a good location.
- --Pos;
- }
-
- bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC,
- DebugLoc());
- assert(Emitted && "Unable to issue a live-in copy instruction!\n");
- (void) Emitted;
-
- CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
- if (Coalesced) {
- if (&*InsertPos == UseMI) ++InsertPos;
- MBB->erase(UseMI);
- }
-}
-
/// EmitLiveInCopies - Emit copies to initialize livein virtual registers
/// into the given entry block.
void
MachineRegisterInfo::EmitLiveInCopies(MachineBasicBlock *EntryMBB,
const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII) {
- if (SchedLiveInCopies) {
- // Emit the copies at a heuristically-determined location in the block.
- DenseMap<MachineInstr*, unsigned> CopyRegMap;
- MachineBasicBlock::iterator InsertPos = EntryMBB->begin();
- for (MachineRegisterInfo::livein_iterator LI = livein_begin(),
- E = livein_end(); LI != E; ++LI)
- if (LI->second) {
- const TargetRegisterClass *RC = getRegClass(LI->second);
- EmitLiveInCopy(EntryMBB, InsertPos, LI->second, LI->first,
- RC, CopyRegMap, *this, TRI, TII);
+ // Emit the copies into the top of the block.
+ for (unsigned i = 0, e = LiveIns.size(); i != e; ++i)
+ if (LiveIns[i].second) {
+ if (use_empty(LiveIns[i].second)) {
+ // The livein has no uses. Drop it.
+ //
+ // It would be preferable to have isel avoid creating live-in
+ // records for unused arguments in the first place, but it's
+ // complicated by the debug info code for arguments.
+ LiveIns.erase(LiveIns.begin() + i);
+ --i; --e;
+ } else {
+ // Emit a copy.
+ BuildMI(*EntryMBB, EntryMBB->begin(), DebugLoc(),
+ TII.get(TargetOpcode::COPY), LiveIns[i].second)
+ .addReg(LiveIns[i].first);
+
+ // Add the register to the entry block live-in set.
+ EntryMBB->addLiveIn(LiveIns[i].first);
}
- } else {
- // Emit the copies into the top of the block.
- for (MachineRegisterInfo::livein_iterator LI = livein_begin(),
- E = livein_end(); LI != E; ++LI)
- if (LI->second) {
- const TargetRegisterClass *RC = getRegClass(LI->second);
- bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
- LI->second, LI->first, RC, RC,
- DebugLoc());
- assert(Emitted && "Unable to issue a live-in copy instruction!\n");
- (void) Emitted;
- }
- }
-
- // Add function live-ins to entry block live-in set.
- for (MachineRegisterInfo::livein_iterator I = livein_begin(),
- E = livein_end(); I != E; ++I)
- EntryMBB->addLiveIn(I->first);
+ } else {
+ // Add the register to the entry block live-in set.
+ EntryMBB->addLiveIn(LiveIns[i].first);
+ }
}
void MachineRegisterInfo::closePhysRegsUsed(const TargetRegisterInfo &TRI) {
diff --git a/contrib/llvm/lib/CodeGen/MachineSink.cpp b/contrib/llvm/lib/CodeGen/MachineSink.cpp
index 1610e6c..61334fc 100644
--- a/contrib/llvm/lib/CodeGen/MachineSink.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineSink.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass moves instructions into successor blocks, when possible, so that
+// This pass moves instructions into successor blocks when possible, so that
// they aren't executed on paths where their results aren't needed.
//
// This pass is not intended to be a replacement or a complete alternative
@@ -45,9 +45,9 @@ namespace {
public:
static char ID; // Pass identification
MachineSinking() : MachineFunctionPass(&ID) {}
-
+
virtual bool runOnMachineFunction(MachineFunction &MF);
-
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -63,7 +63,7 @@ namespace {
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB) const;
};
} // end anonymous namespace
-
+
char MachineSinking::ID = 0;
static RegisterPass<MachineSinking>
X("machine-sink", "Machine code sinking");
@@ -72,7 +72,7 @@ FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
/// AllUsesDominatedByBlock - Return true if all uses of the specified register
/// occur in blocks dominated by the specified block.
-bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
+bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
MachineBasicBlock *MBB) const {
assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
"Only makes sense for vregs");
@@ -80,27 +80,30 @@ bool MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
// This may leave a referencing dbg_value in the original block, before
// the definition of the vreg. Dwarf generator handles this although the
// user might not get the right info at runtime.
- for (MachineRegisterInfo::use_nodbg_iterator I =
- RegInfo->use_nodbg_begin(Reg),
- E = RegInfo->use_nodbg_end(); I != E; ++I) {
+ for (MachineRegisterInfo::use_nodbg_iterator
+ I = RegInfo->use_nodbg_begin(Reg), E = RegInfo->use_nodbg_end();
+ I != E; ++I) {
// Determine the block of the use.
MachineInstr *UseInst = &*I;
MachineBasicBlock *UseBlock = UseInst->getParent();
+
if (UseInst->isPHI()) {
// PHI nodes use the operand in the predecessor block, not the block with
// the PHI.
UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
}
+
// Check that it dominates.
if (!DT->dominates(MBB, UseBlock))
return false;
}
+
return true;
}
bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
DEBUG(dbgs() << "******** Machine Sinking ********\n");
-
+
const TargetMachine &TM = MF.getTarget();
TII = TM.getInstrInfo();
TRI = TM.getRegisterInfo();
@@ -111,19 +114,19 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
AllocatableSet = TRI->getAllocatableSet(MF);
bool EverMadeChange = false;
-
+
while (1) {
bool MadeChange = false;
// Process all basic blocks.
- for (MachineFunction::iterator I = MF.begin(), E = MF.end();
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end();
I != E; ++I)
MadeChange |= ProcessBlock(*I);
-
+
// If this iteration over the code changed anything, keep iterating.
if (!MadeChange) break;
EverMadeChange = true;
- }
+ }
return EverMadeChange;
}
@@ -132,8 +135,8 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
if (MBB.succ_size() <= 1 || MBB.empty()) return false;
// Don't bother sinking code out of unreachable blocks. In addition to being
- // unprofitable, it can also lead to infinite looping, because in an unreachable
- // loop there may be nowhere to stop.
+ // unprofitable, it can also lead to infinite looping, because in an
+ // unreachable loop there may be nowhere to stop.
if (!DT->isReachableFromEntry(&MBB)) return false;
bool MadeChange = false;
@@ -144,7 +147,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
bool ProcessedBegin, SawStore = false;
do {
MachineInstr *MI = I; // The instruction to sink.
-
+
// Predecrement I (if it's not begin) so that it isn't invalidated by
// sinking.
ProcessedBegin = I == MBB.begin();
@@ -156,10 +159,10 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
if (SinkInstruction(MI, SawStore))
++NumSunk, MadeChange = true;
-
+
// If we just processed the first instruction in the block, we're done.
} while (!ProcessedBegin);
-
+
return MadeChange;
}
@@ -169,7 +172,7 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// Check if it's safe to move the instruction.
if (!MI->isSafeToMove(TII, AA, SawStore))
return false;
-
+
// FIXME: This should include support for sinking instructions within the
// block they are currently in to shorten the live ranges. We often get
// instructions sunk into the top of a large block, but it would be better to
@@ -177,22 +180,22 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// be careful not to *increase* register pressure though, e.g. sinking
// "x = y + z" down if it kills y and z would increase the live ranges of y
// and z and only shrink the live range of x.
-
+
// Loop over all the operands of the specified instruction. If there is
// anything we can't handle, bail out.
MachineBasicBlock *ParentBlock = MI->getParent();
-
+
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
MachineBasicBlock *SuccToSinkTo = 0;
-
+
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue; // Ignore non-register operands.
-
+
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
-
+
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
if (MO.isUse()) {
// If the physreg has no defs anywhere, it's just an ambient register
@@ -200,13 +203,16 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// it could get allocated to something with a def during allocation.
if (!RegInfo->def_empty(Reg))
return false;
+
if (AllocatableSet.test(Reg))
return false;
+
// Check for a def among the register's aliases too.
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
unsigned AliasReg = *Alias;
if (!RegInfo->def_empty(AliasReg))
return false;
+
if (AllocatableSet.test(AliasReg))
return false;
}
@@ -221,28 +227,31 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// If it's not safe to move defs of the register class, then abort.
if (!TII->isSafeToMoveRegClassDefs(RegInfo->getRegClass(Reg)))
return false;
-
+
// FIXME: This picks a successor to sink into based on having one
// successor that dominates all the uses. However, there are cases where
// sinking can happen but where the sink point isn't a successor. For
// example:
+ //
// x = computation
// if () {} else {}
// use x
- // the instruction could be sunk over the whole diamond for the
+ //
+ // the instruction could be sunk over the whole diamond for the
// if/then/else (or loop, etc), allowing it to be sunk into other blocks
// after that.
-
+
// Virtual register defs can only be sunk if all their uses are in blocks
// dominated by one of the successors.
if (SuccToSinkTo) {
// If a previous operand picked a block to sink to, then this operand
// must be sinkable to the same block.
- if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo))
+ if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo))
return false;
+
continue;
}
-
+
// Otherwise, we should look at all the successors and decide which one
// we should sink to.
for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
@@ -252,13 +261,13 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
break;
}
}
-
+
// If we couldn't find a block to sink to, ignore this instruction.
if (SuccToSinkTo == 0)
return false;
}
}
-
+
// If there are no outputs, it must have side-effects.
if (SuccToSinkTo == 0)
return false;
@@ -267,15 +276,26 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// landing pad is implicitly defined.
if (SuccToSinkTo->isLandingPad())
return false;
-
+
// It is not possible to sink an instruction into its own block. This can
// happen with loops.
if (MI->getParent() == SuccToSinkTo)
return false;
-
- DEBUG(dbgs() << "Sink instr " << *MI);
- DEBUG(dbgs() << "to block " << *SuccToSinkTo);
-
+
+ // If the instruction to move defines a dead physical register which is live
+ // when leaving the basic block, don't move it because it could turn into a
+ // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (SuccToSinkTo->isLiveIn(Reg))
+ return false;
+ }
+
+ DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
+
// If the block has multiple predecessors, this would introduce computation on
// a path that it doesn't already exist. We could split the critical edge,
// but for now we just punt.
@@ -305,18 +325,18 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
// Otherwise we are OK with sinking along a critical edge.
DEBUG(dbgs() << "Sinking along critical edge.\n");
}
-
- // Determine where to insert into. Skip phi nodes.
+
+ // Determine where to insert into. Skip phi nodes.
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
++InsertPos;
-
+
// Move the instruction.
SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
++MachineBasicBlock::iterator(MI));
- // Conservatively, clear any kill flags, since it's possible that
- // they are no longer correct.
+ // Conservatively, clear any kill flags, since it's possible that they are no
+ // longer correct.
MI->clearKillInfo();
return true;
diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
index 8baf01c..2297c90 100644
--- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -390,7 +390,8 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
report("MBB exits via unconditional fall-through but its successor "
"differs from its CFG successor!", MBB);
}
- if (!MBB->empty() && MBB->back().getDesc().isBarrier()) {
+ if (!MBB->empty() && MBB->back().getDesc().isBarrier() &&
+ !TII->isPredicated(&MBB->back())) {
report("MBB exits via unconditional fall-through but ends with a "
"barrier instruction!", MBB);
}
diff --git a/contrib/llvm/lib/CodeGen/OptimizeExts.cpp b/contrib/llvm/lib/CodeGen/OptimizeExts.cpp
index 41fc204..dcdc243 100644
--- a/contrib/llvm/lib/CodeGen/OptimizeExts.cpp
+++ b/contrib/llvm/lib/CodeGen/OptimizeExts.cpp
@@ -118,6 +118,26 @@ bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
continue;
}
+ // It's an error to translate this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
+ //
+ // into this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1027 = COPY %reg1025:4
+ // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
+ //
+ // The problem here is that SUBREG_TO_REG is there to assert that an
+ // implicit zext occurs. It doesn't insert a zext instruction. If we allow
+ // the COPY here, it will give us the value after the <sext>,
+ // not the original value of %reg1024 before <sext>.
+ if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
+ continue;
+
MachineBasicBlock *UseMBB = UseMI->getParent();
if (UseMBB == MBB) {
// Local uses that come after the extension.
@@ -165,8 +185,8 @@ bool OptimizeExts::OptimizeInstr(MachineInstr *MI, MachineBasicBlock *MBB,
continue;
unsigned NewVR = MRI->createVirtualRegister(RC);
BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
- TII->get(TargetOpcode::EXTRACT_SUBREG), NewVR)
- .addReg(DstReg).addImm(SubIdx);
+ TII->get(TargetOpcode::COPY), NewVR)
+ .addReg(DstReg, 0, SubIdx);
UseMO->setReg(NewVR);
++NumReuse;
Changed = true;
diff --git a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
index 2717d4d..1613fe2 100644
--- a/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
+++ b/contrib/llvm/lib/CodeGen/OptimizePHIs.cpp
@@ -107,6 +107,11 @@ bool OptimizePHIs::IsSingleValuePHICycle(MachineInstr *MI,
SrcSubIdx == 0 && DstSubIdx == 0 &&
TargetRegisterInfo::isVirtualRegister(MvSrcReg))
SrcMI = MRI->getVRegDef(MvSrcReg);
+ else if (SrcMI && SrcMI->isCopy() &&
+ !SrcMI->getOperand(0).getSubReg() &&
+ !SrcMI->getOperand(1).getSubReg() &&
+ TargetRegisterInfo::isVirtualRegister(SrcMI->getOperand(1).getReg()))
+ SrcMI = MRI->getVRegDef(SrcMI->getOperand(1).getReg());
if (!SrcMI)
return false;
diff --git a/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h b/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
index bd18b52..02938df 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
+++ b/contrib/llvm/lib/CodeGen/PBQP/HeuristicSolver.h
@@ -406,7 +406,7 @@ namespace PBQP {
// Create node data objects.
for (Graph::NodeItr nItr = g.nodesBegin(), nEnd = g.nodesEnd();
- nItr != nEnd; ++nItr) {
+ nItr != nEnd; ++nItr) {
nodeDataList.push_back(NodeData());
g.setNodeData(nItr, &nodeDataList.back());
}
diff --git a/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h b/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
index 30d34d9..4c1ce11 100644
--- a/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
+++ b/contrib/llvm/lib/CodeGen/PBQP/Heuristics/Briggs.h
@@ -18,7 +18,6 @@
#ifndef LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
#define LLVM_CODEGEN_PBQP_HEURISTICS_BRIGGS_H
-#include "llvm/Support/Compiler.h"
#include "../HeuristicSolver.h"
#include "../HeuristicBase.h"
@@ -267,8 +266,8 @@ namespace PBQP {
if (!nd.isHeuristic)
return;
- EdgeData &ed ATTRIBUTE_UNUSED = getHeuristicEdgeData(eItr);
-
+ EdgeData &ed = getHeuristicEdgeData(eItr);
+ (void)ed;
assert(ed.isUpToDate && "Edge data is not up to date.");
// Update node.
diff --git a/contrib/llvm/lib/CodeGen/PHIElimination.cpp b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
index edbc13f..ea6b094 100644
--- a/contrib/llvm/lib/CodeGen/PHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/PHIElimination.cpp
@@ -34,7 +34,6 @@
using namespace llvm;
STATISTIC(NumAtomic, "Number of atomic phis lowered");
-STATISTIC(NumSplits, "Number of critical edges split on demand");
STATISTIC(NumReused, "Number of reused lowered phis");
char PHIElimination::ID = 0;
@@ -184,7 +183,6 @@ void llvm::PHIElimination::LowerAtomicPHINode(
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
unsigned IncomingReg = 0;
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
@@ -208,10 +206,12 @@ void llvm::PHIElimination::LowerAtomicPHINode(
++NumReused;
DEBUG(dbgs() << "Reusing %reg" << IncomingReg << " for " << *MPhi);
} else {
+ const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
}
- TII->copyRegToReg(MBB, AfterPHIsIt, DestReg, IncomingReg, RC, RC,
- MPhi->getDebugLoc());
+ BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), DestReg)
+ .addReg(IncomingReg);
}
// Update live variable information if there is any.
@@ -293,8 +293,8 @@ void llvm::PHIElimination::LowerAtomicPHINode(
// Insert the copy.
if (!reusedIncoming && IncomingReg)
- TII->copyRegToReg(opBlock, InsertPos, IncomingReg, SrcReg, RC, RC,
- MPhi->getDebugLoc());
+ BuildMI(opBlock, InsertPos, MPhi->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), IncomingReg).addReg(SrcReg);
// Now update live variable information if we have it. Otherwise we're done
if (!LV) continue;
@@ -391,57 +391,8 @@ bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF,
// (not considering PHI nodes). If the register is live in to this block
// anyway, we would gain nothing from splitting.
if (!LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB))
- SplitCriticalEdge(PreMBB, &MBB);
+ PreMBB->SplitCriticalEdge(&MBB, this);
}
}
return true;
}
-
-MachineBasicBlock *PHIElimination::SplitCriticalEdge(MachineBasicBlock *A,
- MachineBasicBlock *B) {
- assert(A && B && "Missing MBB end point");
-
- MachineFunction *MF = A->getParent();
-
- // We may need to update A's terminator, but we can't do that if AnalyzeBranch
- // fails. If A uses a jump table, we won't touch it.
- const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
- MachineBasicBlock *TBB = 0, *FBB = 0;
- SmallVector<MachineOperand, 4> Cond;
- if (TII->AnalyzeBranch(*A, TBB, FBB, Cond))
- return NULL;
-
- ++NumSplits;
-
- MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
- MF->insert(llvm::next(MachineFunction::iterator(A)), NMBB);
- DEBUG(dbgs() << "PHIElimination splitting critical edge:"
- " BB#" << A->getNumber()
- << " -- BB#" << NMBB->getNumber()
- << " -- BB#" << B->getNumber() << '\n');
-
- A->ReplaceUsesOfBlockWith(B, NMBB);
- A->updateTerminator();
-
- // Insert unconditional "jump B" instruction in NMBB if necessary.
- NMBB->addSuccessor(B);
- if (!NMBB->isLayoutSuccessor(B)) {
- Cond.clear();
- MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, B, NULL, Cond);
- }
-
- // Fix PHI nodes in B so they refer to NMBB instead of A
- for (MachineBasicBlock::iterator i = B->begin(), e = B->end();
- i != e && i->isPHI(); ++i)
- for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
- if (i->getOperand(ni+1).getMBB() == A)
- i->getOperand(ni+1).setMBB(NMBB);
-
- if (LiveVariables *LV=getAnalysisIfAvailable<LiveVariables>())
- LV->addNewBlock(NMBB, A, B);
-
- if (MachineDominatorTree *MDT=getAnalysisIfAvailable<MachineDominatorTree>())
- MDT->addNewBlock(NMBB, A);
-
- return NMBB;
-}
diff --git a/contrib/llvm/lib/CodeGen/Passes.cpp b/contrib/llvm/lib/CodeGen/Passes.cpp
index 5ea2941..3489db2 100644
--- a/contrib/llvm/lib/CodeGen/Passes.cpp
+++ b/contrib/llvm/lib/CodeGen/Passes.cpp
@@ -24,6 +24,11 @@ using namespace llvm;
//===---------------------------------------------------------------------===//
MachinePassRegistry RegisterRegAlloc::Registry;
+static FunctionPass *createDefaultRegisterAllocator() { return 0; }
+static RegisterRegAlloc
+defaultRegAlloc("default",
+ "pick register allocator based on -O option",
+ createDefaultRegisterAllocator);
//===---------------------------------------------------------------------===//
///
@@ -33,8 +38,8 @@ MachinePassRegistry RegisterRegAlloc::Registry;
static cl::opt<RegisterRegAlloc::FunctionPassCtor, false,
RegisterPassParser<RegisterRegAlloc> >
RegAlloc("regalloc",
- cl::init(&createLinearScanRegisterAllocator),
- cl::desc("Register allocator to use (default=linearscan)"));
+ cl::init(&createDefaultRegisterAllocator),
+ cl::desc("Register allocator to use"));
//===---------------------------------------------------------------------===//
@@ -42,13 +47,22 @@ RegAlloc("regalloc",
/// createRegisterAllocator - choose the appropriate register allocator.
///
//===---------------------------------------------------------------------===//
-FunctionPass *llvm::createRegisterAllocator() {
+FunctionPass *llvm::createRegisterAllocator(CodeGenOpt::Level OptLevel) {
RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault();
-
+
if (!Ctor) {
Ctor = RegAlloc;
RegisterRegAlloc::setDefault(RegAlloc);
}
-
- return Ctor();
+
+ if (Ctor != createDefaultRegisterAllocator)
+ return Ctor();
+
+ // When the 'default' allocator is requested, pick one based on OptLevel.
+ switch (OptLevel) {
+ case CodeGenOpt::None:
+ return createFastRegisterAllocator();
+ default:
+ return createLinearScanRegisterAllocator();
+ }
}
diff --git a/contrib/llvm/lib/CodeGen/ExactHazardRecognizer.cpp b/contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
index af5f289..cbde2b0 100644
--- a/contrib/llvm/lib/CodeGen/ExactHazardRecognizer.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRAHazardRecognizer.cpp
@@ -1,4 +1,4 @@
-//===----- ExactHazardRecognizer.cpp - hazard recognizer -------- ---------===//
+//===----- PostRAHazardRecognizer.cpp - hazard recognizer -------- ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "post-RA-sched"
-#include "ExactHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -22,10 +22,9 @@
using namespace llvm;
-ExactHazardRecognizer::
-ExactHazardRecognizer(const InstrItineraryData &LItinData) :
- ScheduleHazardRecognizer(), ItinData(LItinData)
-{
+PostRAHazardRecognizer::
+PostRAHazardRecognizer(const InstrItineraryData &LItinData) :
+ ScheduleHazardRecognizer(), ItinData(LItinData) {
// Determine the maximum depth of any itinerary. This determines the
// depth of the scoreboard. We always make the scoreboard at least 1
// cycle deep to avoid dealing with the boundary condition.
@@ -48,16 +47,16 @@ ExactHazardRecognizer(const InstrItineraryData &LItinData) :
ReservedScoreboard.reset(ScoreboardDepth);
RequiredScoreboard.reset(ScoreboardDepth);
- DEBUG(dbgs() << "Using exact hazard recognizer: ScoreboardDepth = "
+ DEBUG(dbgs() << "Using post-ra hazard recognizer: ScoreboardDepth = "
<< ScoreboardDepth << '\n');
}
-void ExactHazardRecognizer::Reset() {
+void PostRAHazardRecognizer::Reset() {
RequiredScoreboard.reset();
ReservedScoreboard.reset();
}
-void ExactHazardRecognizer::ScoreBoard::dump() const {
+void PostRAHazardRecognizer::ScoreBoard::dump() const {
dbgs() << "Scoreboard:\n";
unsigned last = Depth - 1;
@@ -73,7 +72,8 @@ void ExactHazardRecognizer::ScoreBoard::dump() const {
}
}
-ExactHazardRecognizer::HazardType ExactHazardRecognizer::getHazardType(SUnit *SU) {
+ScheduleHazardRecognizer::HazardType
+PostRAHazardRecognizer::getHazardType(SUnit *SU) {
if (ItinData.isEmpty())
return NoHazard;
@@ -120,7 +120,7 @@ ExactHazardRecognizer::HazardType ExactHazardRecognizer::getHazardType(SUnit *SU
return NoHazard;
}
-void ExactHazardRecognizer::EmitInstruction(SUnit *SU) {
+void PostRAHazardRecognizer::EmitInstruction(SUnit *SU) {
if (ItinData.isEmpty())
return;
@@ -174,7 +174,7 @@ void ExactHazardRecognizer::EmitInstruction(SUnit *SU) {
DEBUG(RequiredScoreboard.dump());
}
-void ExactHazardRecognizer::AdvanceCycle() {
+void PostRAHazardRecognizer::AdvanceCycle() {
ReservedScoreboard[0] = 0; ReservedScoreboard.advance();
RequiredScoreboard[0] = 0; RequiredScoreboard.advance();
}
diff --git a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
index 9714ea6..4af8e07 100644
--- a/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
+++ b/contrib/llvm/lib/CodeGen/PostRASchedulerList.cpp
@@ -22,8 +22,6 @@
#include "AntiDepBreaker.h"
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
-#include "ExactHazardRecognizer.h"
-#include "SimpleHazardRecognizer.h"
#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
@@ -65,10 +63,6 @@ EnableAntiDepBreaking("break-anti-dependencies",
cl::desc("Break post-RA scheduling anti-dependencies: "
"\"critical\", \"all\", or \"none\""),
cl::init("none"), cl::Hidden);
-static cl::opt<bool>
-EnablePostRAHazardAvoidance("avoid-hazards",
- cl::desc("Enable exact hazard avoidance"),
- cl::init(true), cl::Hidden);
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
static cl::opt<int>
@@ -85,6 +79,7 @@ AntiDepBreaker::~AntiDepBreaker() { }
namespace {
class PostRAScheduler : public MachineFunctionPass {
AliasAnalysis *AA;
+ const TargetInstrInfo *TII;
CodeGenOpt::Level OptLevel;
public:
@@ -187,30 +182,9 @@ namespace {
};
}
-/// isSchedulingBoundary - Test if the given instruction should be
-/// considered a scheduling boundary. This primarily includes labels
-/// and terminators.
-///
-static bool isSchedulingBoundary(const MachineInstr *MI,
- const MachineFunction &MF) {
- // Terminators and labels can't be scheduled around.
- if (MI->getDesc().isTerminator() || MI->isLabel())
- return true;
-
- // Don't attempt to schedule around any instruction that defines
- // a stack-oriented pointer, as it's unlikely to be profitable. This
- // saves compile time, because it doesn't require every single
- // stack slot reference to depend on the instruction that does the
- // modification.
- const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
- if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
- return true;
-
- return false;
-}
-
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
AA = &getAnalysis<AliasAnalysis>();
+ TII = Fn.getTarget().getInstrInfo();
// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
@@ -237,10 +211,10 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
- const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
- ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
- (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
- (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
+ const TargetMachine &TM = Fn.getTarget();
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ ScheduleHazardRecognizer *HR =
+ TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
@@ -271,8 +245,8 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
MachineBasicBlock::iterator Current = MBB->end();
unsigned Count = MBB->size(), CurrentCount = Count;
for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
- MachineInstr *MI = prior(I);
- if (isSchedulingBoundary(MI, Fn)) {
+ MachineInstr *MI = llvm::prior(I);
+ if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
Scheduler.Run(MBB, I, Current, CurrentCount);
Scheduler.EmitSchedule();
Current = MI;
@@ -680,15 +654,6 @@ void SchedulePostRATDList::ListScheduleTopDown() {
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
-
- // If we are using the target-specific hazards, then don't
- // advance the cycle time just because we schedule a node. If
- // the target allows it we can schedule multiple nodes in the
- // same cycle.
- if (!EnablePostRAHazardAvoidance) {
- if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
- }
} else {
if (CycleHasInsts) {
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
diff --git a/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp b/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
index 96e7327..fb2f909 100644
--- a/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
+++ b/contrib/llvm/lib/CodeGen/PreAllocSplitting.cpp
@@ -512,9 +512,6 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI));
// FIXME: Need to set kills properly for inter-block stuff.
- if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
- if (IsIntraBlock)
- RetVNI->addKill(EndIndex);
} else if (ContainsDefs && ContainsUses) {
SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
@@ -556,12 +553,6 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
NewVNs, LiveOut, Phis, false, true);
LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-
- if (foundUse && RetVNI->isKill(StartIndex))
- RetVNI->removeKill(StartIndex);
- if (IsIntraBlock) {
- RetVNI->addKill(EndIndex);
- }
}
// Memoize results so we don't have to recompute them.
@@ -636,9 +627,6 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
I->second->setHasPHIKill(true);
- SlotIndex KillIndex(LIs->getMBBEndIdx(I->first), true);
- if (!I->second->isKill(KillIndex))
- I->second->addKill(KillIndex);
}
}
@@ -648,8 +636,6 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
} else
EndIndex = LIs->getMBBEndIdx(MBB);
LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
- if (IsIntraBlock)
- RetVNI->addKill(EndIndex);
// Memoize results so we don't have to recompute them.
if (!IsIntraBlock)
@@ -691,10 +677,12 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
// If the def is a move, set the copy field.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
+ if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
if (DstReg == LI->reg)
NewVN->setCopy(&*DI);
-
+ } else if (DI->isCopyLike() && DI->getOperand(0).getReg() == LI->reg)
+ NewVN->setCopy(&*DI);
+
NewVNs[&*DI] = NewVN;
}
@@ -725,25 +713,6 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
VNInfo* DeadVN = NewVNs[&*DI];
LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
- DeadVN->addKill(DefIdx);
- }
-
- // Update kill markers.
- for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
- VI != VE; ++VI) {
- VNInfo* VNI = *VI;
- for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
- SlotIndex KillIdx = VNI->kills[i];
- if (KillIdx.isPHI())
- continue;
- MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
- if (KillMI) {
- MachineOperand *KillMO = KillMI->findRegisterUseOperand(CurrLI->reg);
- if (KillMO)
- // It could be a dead def.
- KillMO->setIsKill();
- }
- }
}
}
@@ -773,19 +742,14 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
VNsToCopy.push_back(OldVN);
// Locate two-address redefinitions
- for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
- KE = OldVN->kills.end(); KI != KE; ++KI) {
- assert(!KI->isPHI() &&
- "VN previously reported having no PHI kills.");
- MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
- unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
- if (DefIdx == ~0U) continue;
- if (MI->isRegTiedToUseOperand(DefIdx)) {
- VNInfo* NextVN =
- CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
- if (NextVN == OldVN) continue;
+ for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(CurrLI->reg),
+ DE = MRI->def_end(); DI != DE; ++DI) {
+ if (!DI->isRegTiedToUseOperand(DI.getOperandNo())) continue;
+ SlotIndex DefIdx = LIs->getInstructionIndex(&*DI).getDefIndex();
+ VNInfo* NextVN = CurrLI->findDefinedVNInfoForRegInt(DefIdx);
+ if (std::find(VNsToCopy.begin(), VNsToCopy.end(), NextVN) !=
+ VNsToCopy.end())
Stack.push_back(NextVN);
- }
}
}
@@ -836,7 +800,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
if (IntervalSSMap.count(CurrLI->reg))
IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
- NumRenumbers++;
+ ++NumRenumbers;
}
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
@@ -854,7 +818,7 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
if (KillPt == DefMI->getParent()->end())
return false;
- TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, TRI);
+ TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, *TRI);
SlotIndex RematIdx = LIs->InsertMachineInstrInMaps(prior(RestorePt));
ReconstructLiveInterval(CurrLI);
@@ -899,12 +863,11 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
}
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumFolds;
IntervalSSMap[vreg] = SS;
@@ -980,12 +943,11 @@ MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
if (!TII->canFoldMemoryOperand(FoldPt, Ops))
return 0;
- MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
- FoldPt, Ops, SS);
+ MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
if (FMI) {
LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
- FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ FoldPt->eraseFromParent();
++NumRestoreFolds;
}
@@ -1192,7 +1154,7 @@ unsigned PreAllocSplitting::getNumberOfNonSpills(
int StoreFrameIndex;
unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
- NonSpills++;
+ ++NonSpills;
int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
@@ -1255,7 +1217,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
VNUseCount.erase(CurrVN);
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
continue;
}
@@ -1291,9 +1253,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
Ops.push_back(OpIdx);
if (!TII->canFoldMemoryOperand(use, Ops)) continue;
- MachineInstr* NewMI =
- TII->foldMemoryOperand(*use->getParent()->getParent(),
- use, Ops, FrameIndex);
+ MachineInstr* NewMI = TII->foldMemoryOperand(use, Ops, FrameIndex);
if (!NewMI) continue;
@@ -1303,10 +1263,9 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
- MachineBasicBlock* MBB = use->getParent();
- NewMI = MBB->insert(MBB->erase(use), NewMI);
+ use->eraseFromParent();
VNUseCount[CurrVN].erase(use);
-
+
// Remove deleted instructions. Note that we need to remove them from
// the VNInfo->use map as well, just to be safe.
for (SmallPtrSet<MachineInstr*, 4>::iterator II =
@@ -1328,7 +1287,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
if (VI->second.erase(use))
VI->second.insert(NewMI);
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
continue;
}
@@ -1350,7 +1309,7 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
LIs->RemoveMachineInstrFromMaps(DefMI);
(*LI)->removeValNo(CurrVN);
DefMI->eraseFromParent();
- NumDeadSpills++;
+ ++NumDeadSpills;
changed = true;
}
}
diff --git a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
index 62f525f..2e31908 100644
--- a/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
+++ b/contrib/llvm/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -41,18 +41,48 @@ void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
-bool ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
- unsigned Reg, unsigned OpIdx,
- const TargetInstrInfo *tii_) {
+bool
+ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
+ unsigned Reg, unsigned OpIdx,
+ const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs) {
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg && SrcSubReg == 0 && DstSubReg == 0)
+ Reg == SrcReg &&
+ (DstSubReg == 0 || ImpDefRegs.count(DstReg)))
return true;
- if (OpIdx == 2 && MI->isSubregToReg())
- return true;
- if (OpIdx == 1 && MI->isExtractSubreg())
- return true;
+ switch(OpIdx) {
+ case 1:
+ return MI->isCopy() && (MI->getOperand(0).getSubReg() == 0 ||
+ ImpDefRegs.count(MI->getOperand(0).getReg()));
+ case 2:
+ return MI->isSubregToReg() && (MI->getOperand(0).getSubReg() == 0 ||
+ ImpDefRegs.count(MI->getOperand(0).getReg()));
+ default: return false;
+ }
+}
+
+static bool isUndefCopy(MachineInstr *MI, unsigned Reg,
+ const TargetInstrInfo *tii_,
+ SmallSet<unsigned, 8> &ImpDefRegs) {
+ if (MI->isCopy()) {
+ MachineOperand &MO0 = MI->getOperand(0);
+ MachineOperand &MO1 = MI->getOperand(1);
+ if (MO1.getReg() != Reg)
+ return false;
+ if (!MO0.getSubReg() || ImpDefRegs.count(MO0.getReg()))
+ return true;
+ return false;
+ }
+
+ unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
+ if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
+ if (Reg != SrcReg)
+ return false;
+ if (DstSubReg == 0 || ImpDefRegs.count(DstReg))
+ return true;
+ }
return false;
}
@@ -101,11 +131,10 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
continue;
}
- if (MI->isInsertSubreg()) {
- MachineOperand &MO = MI->getOperand(2);
- if (ImpDefRegs.count(MO.getReg())) {
- // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
- // This is an identity copy, eliminate it now.
+ // Eliminate %reg1032:sub<def> = COPY undef.
+ if (MI->isCopy() && MI->getOperand(0).getSubReg()) {
+ MachineOperand &MO = MI->getOperand(1);
+ if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
if (MO.isKill()) {
LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
vi.removeKill(MI);
@@ -119,7 +148,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
bool ChangedToImpDef = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef())
+ if (!MO.isReg() || (MO.isDef() && !MO.getSubReg()) || MO.isUndef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
@@ -127,7 +156,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
if (!ImpDefRegs.count(Reg))
continue;
// Use is a copy, just turn it into an implicit_def.
- if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
+ if (CanTurnIntoImplicitDef(MI, Reg, i, tii_, ImpDefRegs)) {
bool isKill = MO.isKill();
MI->setDesc(tii_->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
@@ -144,6 +173,12 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
Changed = true;
MO.setIsUndef();
+ // This is a partial register redef of an implicit def.
+ // Make sure the whole register is defined by the instruction.
+ if (MO.isDef()) {
+ MI->addRegisterDefined(Reg);
+ continue;
+ }
if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
// Make sure other uses of
for (unsigned j = i+1; j != e; ++j) {
@@ -218,9 +253,7 @@ bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
MachineInstr *RMI = RUses[i];
// Turn a copy use into an implicit_def.
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg && SrcSubReg == 0 && DstSubReg == 0) {
+ if (isUndefCopy(RMI, Reg, tii_, ImpDefRegs)) {
RMI->setDesc(tii_->get(TargetOpcode::IMPLICIT_DEF));
bool isKill = false;
diff --git a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
index e778024..3843b25 100644
--- a/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/contrib/llvm/lib/CodeGen/PrologEpilogInserter.cpp
@@ -158,9 +158,9 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) {
AdjustsStack = true;
FrameSDOps.push_back(I);
} else if (I->isInlineAsm()) {
- // An InlineAsm might be a call; assume it is to get the stack frame
- // aligned correctly for calls.
- AdjustsStack = true;
+ // Some inline asm's need a stack frame, as indicated by operand 1.
+ if (I->getOperand(1).getImm())
+ AdjustsStack = true;
}
MFI->setAdjustsStack(AdjustsStack);
@@ -202,22 +202,17 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
if (Fn.getFunction()->hasFnAttr(Attribute::Naked))
return;
- // Figure out which *callee saved* registers are modified by the current
- // function, thus needing to be saved and restored in the prolog/epilog.
- const TargetRegisterClass * const *CSRegClasses =
- RegInfo->getCalleeSavedRegClasses(&Fn);
-
std::vector<CalleeSavedInfo> CSI;
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
if (Fn.getRegInfo().isPhysRegUsed(Reg)) {
// If the reg is modified, save it!
- CSI.push_back(CalleeSavedInfo(Reg, CSRegClasses[i]));
+ CSI.push_back(CalleeSavedInfo(Reg));
} else {
for (const unsigned *AliasSet = RegInfo->getAliasSet(Reg);
*AliasSet; ++AliasSet) { // Check alias registers too.
if (Fn.getRegInfo().isPhysRegUsed(*AliasSet)) {
- CSI.push_back(CalleeSavedInfo(Reg, CSRegClasses[i]));
+ CSI.push_back(CalleeSavedInfo(Reg));
break;
}
}
@@ -236,7 +231,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
for (std::vector<CalleeSavedInfo>::iterator
I = CSI.begin(), E = CSI.end(); I != E; ++I) {
unsigned Reg = I->getReg();
- const TargetRegisterClass *RC = I->getRegClass();
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
int FrameIdx;
if (RegInfo->hasReservedSpillSlot(Fn, Reg, FrameIdx)) {
@@ -265,8 +260,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
- FrameIdx = MFI->CreateFixedObject(RC->getSize(), FixedSlot->Offset,
- true, false);
+ FrameIdx = MFI->CreateFixedObject(RC->getSize(), FixedSlot->Offset, true);
}
I->setFrameIdx(FrameIdx);
@@ -303,8 +297,10 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
EntryBlock->addLiveIn(CSI[i].getReg());
// Insert the spill to the stack frame.
- TII.storeRegToStackSlot(*EntryBlock, I, CSI[i].getReg(), true,
- CSI[i].getFrameIdx(), CSI[i].getRegClass(),TRI);
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*EntryBlock, I, Reg, true,
+ CSI[i].getFrameIdx(), RC, TRI);
}
}
@@ -328,9 +324,11 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// terminators that preceed it.
if (!TII.restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- TII.loadRegFromStackSlot(*MBB, I, CSI[i].getReg(),
+ unsigned Reg = CSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(*MBB, I, Reg,
CSI[i].getFrameIdx(),
- CSI[i].getRegClass(), TRI);
+ RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
@@ -374,10 +372,12 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
MBB->addLiveIn(blockCSI[i].getReg());
// Insert the spill to the stack frame.
- TII.storeRegToStackSlot(*MBB, I, blockCSI[i].getReg(),
+ unsigned Reg = blockCSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.storeRegToStackSlot(*MBB, I, Reg,
true,
blockCSI[i].getFrameIdx(),
- blockCSI[i].getRegClass(), TRI);
+ RC, TRI);
}
}
@@ -423,9 +423,11 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Restore all registers immediately before the return and any
// terminators that preceed it.
for (unsigned i = 0, e = blockCSI.size(); i != e; ++i) {
- TII.loadRegFromStackSlot(*MBB, I, blockCSI[i].getReg(),
+ unsigned Reg = blockCSI[i].getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(*MBB, I, Reg,
blockCSI[i].getFrameIdx(),
- blockCSI[i].getRegClass(), TRI);
+ RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
@@ -639,6 +641,9 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
for (MachineFunction::iterator BB = Fn.begin(),
E = Fn.end(); BB != E; ++BB) {
+#ifndef NDEBUG
+ int SPAdjCount = 0; // frame setup / destroy count.
+#endif
int SPAdj = 0; // SP offset due to call frame setup / destroy.
if (RS && !FrameIndexVirtualScavenging) RS->enterBasicBlock(BB);
@@ -646,6 +651,10 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (I->getOpcode() == FrameSetupOpcode ||
I->getOpcode() == FrameDestroyOpcode) {
+#ifndef NDEBUG
+ // Track whether we see even pairs of them
+ SPAdjCount += I->getOpcode() == FrameSetupOpcode ? 1 : -1;
+#endif
// Remember how much SP has been adjusted to create the call
// frame.
int Size = I->getOperand(0).getImm();
@@ -712,7 +721,13 @@ void PEI::replaceFrameIndices(MachineFunction &Fn) {
if (RS && !FrameIndexVirtualScavenging && MI) RS->forward(MI);
}
- assert(SPAdj == 0 && "Unbalanced call frame setup / destroy pairs?");
+ // If we have evenly matched pairs of frame setup / destroy instructions,
+ // make sure the adjustments come out to zero. If we don't have matched
+ // pairs, we can't be sure the missing bit isn't in another basic block
+ // due to a custom inserter playing tricks, so just asserting SPAdj==0
+ // isn't sufficient. See tMOVCC on Thumb1, for example.
+ assert((SPAdjCount || SPAdj == 0) &&
+ "Unbalanced call frame setup / destroy pairs?");
}
}
@@ -870,11 +885,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) {
// Scavenge a new scratch register
CurrentVirtReg = Reg;
const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(Reg);
- CurrentScratchReg = RS->FindUnusedReg(RC);
- if (CurrentScratchReg == 0)
- // No register is "free". Scavenge a register.
- CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
-
+ CurrentScratchReg = RS->scavengeRegister(RC, I, SPAdj);
PrevValue = Value;
}
// replace this reference to the virtual register with the
diff --git a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
index b3b5760..f44478e 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocFast.cpp
@@ -110,6 +110,11 @@ namespace {
// Allocatable - vector of allocatable physical registers.
BitVector Allocatable;
+ // SkippedInstrs - Descriptors of instructions whose clobber list was ignored
+ // because all registers were spilled. It is still necessary to mark all the
+ // clobbered registers as used by the function.
+ SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
+
// isBulkSpilling - This flag is set when LiveRegMap will be cleared
// completely after spilling all live registers. LiveRegMap entries should
// not be erased.
@@ -135,6 +140,8 @@ namespace {
private:
bool runOnMachineFunction(MachineFunction &Fn);
void AllocateBasicBlock();
+ void handleThroughOperands(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &VirtDead);
int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
bool isLastUseOfLocalReg(MachineOperand&);
@@ -508,27 +515,20 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum,
bool New;
tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
LiveReg &LR = LRI->second;
- bool PartialRedef = MI->getOperand(OpNum).getSubReg();
if (New) {
// If there is no hint, peek at the only use of this register.
if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
MRI->hasOneNonDBGUse(VirtReg)) {
+ const MachineInstr &UseMI = *MRI->use_nodbg_begin(VirtReg);
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
// It's a copy, use the destination register as a hint.
- if (TII->isMoveInstr(*MRI->use_nodbg_begin(VirtReg),
- SrcReg, DstReg, SrcSubReg, DstSubReg))
+ if (UseMI.isCopyLike())
+ Hint = UseMI.getOperand(0).getReg();
+ else if (TII->isMoveInstr(UseMI, SrcReg, DstReg, SrcSubReg, DstSubReg))
Hint = DstReg;
}
allocVirtReg(MI, *LRI, Hint);
- // If this is only a partial redefinition, we must reload the other parts.
- if (PartialRedef && MI->readsVirtualRegister(VirtReg)) {
- const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
- int FI = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << "Reloading for partial redef: %reg" << VirtReg << "\n");
- TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FI, RC, TRI);
- ++NumLoads;
- }
- } else if (LR.LastUse && !PartialRedef) {
+ } else if (LR.LastUse) {
// Redefining a live register - kill at the last use, unless it is this
// instruction defining VirtReg multiple times.
if (LR.LastUse != MI || LR.LastUse->getOperand(LR.LastOpNum).isUse())
@@ -564,10 +564,16 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
} else if (LR.Dirty) {
if (isLastUseOfLocalReg(MO)) {
DEBUG(dbgs() << "Killing last use: " << MO << "\n");
- MO.setIsKill();
+ if (MO.isUse())
+ MO.setIsKill();
+ else
+ MO.setIsDead();
} else if (MO.isKill()) {
DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
+ MO.setIsDead(false);
}
} else if (MO.isKill()) {
// We must remove kill flags from uses of reloaded registers because the
@@ -576,6 +582,9 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum,
// This would cause a second reload of %x into a different register.
DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
MO.setIsKill(false);
+ } else if (MO.isDead()) {
+ DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
+ MO.setIsDead(false);
}
assert(LR.PhysReg && "Register not assigned");
LR.LastUse = MI;
@@ -607,6 +616,91 @@ bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) {
return MO.isDead();
}
+// Handle special instruction operand like early clobbers and tied ops when
+// there are additional physreg defines.
+void RAFast::handleThroughOperands(MachineInstr *MI,
+ SmallVectorImpl<unsigned> &VirtDead) {
+ DEBUG(dbgs() << "Scanning for through registers:");
+ SmallSet<unsigned, 8> ThroughRegs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
+ (MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
+ if (ThroughRegs.insert(Reg))
+ DEBUG(dbgs() << " %reg" << Reg);
+ }
+ }
+
+ // If any physreg defines collide with preallocated through registers,
+ // we must spill and reallocate.
+ DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ UsedInInstr.set(Reg);
+ if (ThroughRegs.count(PhysRegState[Reg]))
+ definePhysReg(MI, Reg, regFree);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ UsedInInstr.set(*AS);
+ if (ThroughRegs.count(PhysRegState[*AS]))
+ definePhysReg(MI, *AS, regFree);
+ }
+ }
+
+ SmallVector<unsigned, 8> PartialDefs;
+ DEBUG(dbgs() << "Allocating tied uses and early clobbers.\n");
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (MO.isUse()) {
+ unsigned DefIdx = 0;
+ if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
+ DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
+ << DefIdx << ".\n");
+ LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+ unsigned PhysReg = LRI->second.PhysReg;
+ setPhysReg(MI, i, PhysReg);
+ // Note: we don't update the def operand yet. That would cause the normal
+ // def-scan to attempt spilling.
+ } else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) {
+ DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
+ // Reload the register, but don't assign to the operand just yet.
+ // That would confuse the later phys-def processing pass.
+ LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+ PartialDefs.push_back(LRI->second.PhysReg);
+ } else if (MO.isEarlyClobber()) {
+ // Note: defineVirtReg may invalidate MO.
+ LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
+ unsigned PhysReg = LRI->second.PhysReg;
+ if (setPhysReg(MI, i, PhysReg))
+ VirtDead.push_back(Reg);
+ }
+ }
+
+ // Restore UsedInInstr to a state usable for allocating normal virtual uses.
+ UsedInInstr.reset();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ UsedInInstr.set(Reg);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ UsedInInstr.set(*AS);
+ }
+
+ // Also mark PartialDefs as used to avoid reallocation.
+ for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
+ UsedInInstr.set(PartialDefs[i]);
+}
+
void RAFast::AllocateBasicBlock() {
DEBUG(dbgs() << "\nAllocating " << *MBB);
@@ -620,7 +714,7 @@ void RAFast::AllocateBasicBlock() {
E = MBB->livein_end(); I != E; ++I)
definePhysReg(MII, *I, regReserved);
- SmallVector<unsigned, 8> PhysECs, VirtDead;
+ SmallVector<unsigned, 8> VirtDead;
SmallVector<MachineInstr*, 32> Coalesced;
// Otherwise, sequentially allocate each instruction in the MBB.
@@ -670,8 +764,25 @@ void RAFast::AllocateBasicBlock() {
LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
if (LRI != LiveVirtRegs.end())
setPhysReg(MI, i, LRI->second.PhysReg);
- else
- MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+ else {
+ int SS = StackSlotForVirtReg[Reg];
+ if (SS == -1)
+ MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+ else {
+ // Modify DBG_VALUE now that the value is in a spill slot.
+ uint64_t Offset = MI->getOperand(1).getImm();
+ const MDNode *MDPtr =
+ MI->getOperand(MI->getNumOperands()-1).getMetadata();
+ DebugLoc DL = MI->getDebugLoc();
+ if (MachineInstr *NewDV =
+ TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) {
+ DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+ MachineBasicBlock *MBB = MI->getParent();
+ MBB->insert(MBB->erase(MI), NewDV);
+ } else
+ MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+ }
+ }
}
// Next instruction.
continue;
@@ -679,17 +790,25 @@ void RAFast::AllocateBasicBlock() {
// If this is a copy, we may be able to coalesce.
unsigned CopySrc, CopyDst, CopySrcSub, CopyDstSub;
- if (!TII->isMoveInstr(*MI, CopySrc, CopyDst, CopySrcSub, CopyDstSub))
+ if (MI->isCopy()) {
+ CopyDst = MI->getOperand(0).getReg();
+ CopySrc = MI->getOperand(1).getReg();
+ CopyDstSub = MI->getOperand(0).getSubReg();
+ CopySrcSub = MI->getOperand(1).getSubReg();
+ } else if (!TII->isMoveInstr(*MI, CopySrc, CopyDst, CopySrcSub, CopyDstSub))
CopySrc = CopyDst = 0;
// Track registers used by instruction.
UsedInInstr.reset();
- PhysECs.clear();
// First scan.
// Mark physreg uses and early clobbers as used.
// Find the end of the virtreg operands
unsigned VirtOpEnd = 0;
+ bool hasTiedOps = false;
+ bool hasEarlyClobbers = false;
+ bool hasPartialRedefs = false;
+ bool hasPhysDefs = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
@@ -697,20 +816,44 @@ void RAFast::AllocateBasicBlock() {
if (!Reg) continue;
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
VirtOpEnd = i+1;
+ if (MO.isUse()) {
+ hasTiedOps = hasTiedOps ||
+ TID.getOperandConstraint(i, TOI::TIED_TO) != -1;
+ } else {
+ if (MO.isEarlyClobber())
+ hasEarlyClobbers = true;
+ if (MO.getSubReg() && MI->readsVirtualRegister(Reg))
+ hasPartialRedefs = true;
+ }
continue;
}
if (!Allocatable.test(Reg)) continue;
if (MO.isUse()) {
usePhysReg(MO);
} else if (MO.isEarlyClobber()) {
- definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
- PhysECs.push_back(Reg);
- }
+ definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
+ regFree : regReserved);
+ hasEarlyClobbers = true;
+ } else
+ hasPhysDefs = true;
+ }
+
+ // The instruction may have virtual register operands that must be allocated
+ // the same register at use-time and def-time: early clobbers and tied
+ // operands. If there are also physical defs, these registers must avoid
+ // both physical defs and uses, making them more constrained than normal
+ // operands.
+ // We didn't detect inline asm tied operands above, so just make this extra
+ // pass for all inline asm.
+ if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
+ (hasTiedOps && hasPhysDefs)) {
+ handleThroughOperands(MI, VirtDead);
+ // Don't attempt coalescing when we have funny stuff going on.
+ CopyDst = 0;
}
// Second scan.
- // Allocate virtreg uses and early clobbers.
- // Collect VirtKills
+ // Allocate virtreg uses.
for (unsigned i = 0; i != VirtOpEnd; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
@@ -722,12 +865,6 @@ void RAFast::AllocateBasicBlock() {
CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0;
if (setPhysReg(MI, i, PhysReg))
killVirtReg(LRI);
- } else if (MO.isEarlyClobber()) {
- // Note: defineVirtReg may invalidate MO.
- LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
- unsigned PhysReg = LRI->second.PhysReg;
- setPhysReg(MI, i, PhysReg);
- PhysECs.push_back(PhysReg);
}
}
@@ -735,12 +872,16 @@ void RAFast::AllocateBasicBlock() {
// Track registers defined by instruction - early clobbers at this point.
UsedInInstr.reset();
- for (unsigned i = 0, e = PhysECs.size(); i != e; ++i) {
- unsigned PhysReg = PhysECs[i];
- UsedInInstr.set(PhysReg);
- for (const unsigned *AS = TRI->getAliasSet(PhysReg);
- unsigned Alias = *AS; ++AS)
- UsedInInstr.set(Alias);
+ if (hasEarlyClobbers) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef()) continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ UsedInInstr.set(Reg);
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+ UsedInInstr.set(*AS);
+ }
}
unsigned DefOpEnd = MI->getNumOperands();
@@ -752,13 +893,18 @@ void RAFast::AllocateBasicBlock() {
DefOpEnd = VirtOpEnd;
DEBUG(dbgs() << " Spilling remaining registers before call.\n");
spillAll(MI);
+
+ // The imp-defs are skipped below, but we still need to mark those
+ // registers as used by the function.
+ SkippedInstrs.insert(&TID);
}
// Third scan.
// Allocate defs and collect dead defs.
for (unsigned i = 0; i != DefOpEnd; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg()) continue;
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
@@ -837,6 +983,14 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) {
// Make sure the set of used physregs is closed under subreg operations.
MRI->closePhysRegsUsed(*TRI);
+ // Add the clobber lists for all the instructions we skipped earlier.
+ for (SmallPtrSet<const TargetInstrDesc*, 4>::const_iterator
+ I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I)
+ if (const unsigned *Defs = (*I)->getImplicitDefs())
+ while (*Defs)
+ MRI->setPhysRegUsed(*Defs++);
+
+ SkippedInstrs.clear();
StackSlotForVirtReg.clear();
return true;
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp b/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
index bc331f0..044672d 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocLinearScan.cpp
@@ -83,7 +83,8 @@ namespace {
// pressure, it can caused fewer GPRs to be held in the queue.
static cl::opt<unsigned>
NumRecentlyUsedRegs("linearscan-skip-count",
- cl::desc("Number of registers for linearscan to remember to skip."),
+ cl::desc("Number of registers for linearscan to remember"
+ "to skip."),
cl::init(0),
cl::Hidden);
@@ -421,9 +422,10 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
if (vni->def != SlotIndex() && vni->isDefAccurate() &&
(CopyMI = li_->getInstructionFromIndex(vni->def)) &&
- tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg))
+ (CopyMI->isCopy() ||
+ tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubReg, DstSubReg)))
// Defined by a copy, try to extend SrcReg forward
- CandReg = SrcReg;
+ CandReg = CopyMI->isCopy() ? CopyMI->getOperand(1).getReg() : SrcReg;
else if (TrivCoalesceEnds &&
(CopyMI =
li_->getInstructionFromIndex(range.end.getBaseIndex())) &&
@@ -992,6 +994,24 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
mri_->setRegAllocationHint(cur->reg, 0, Reg);
}
+ } else if (CopyMI && CopyMI->isCopy()) {
+ DstReg = CopyMI->getOperand(0).getReg();
+ DstSubReg = CopyMI->getOperand(0).getSubReg();
+ SrcReg = CopyMI->getOperand(1).getReg();
+ SrcSubReg = CopyMI->getOperand(1).getSubReg();
+ unsigned Reg = 0;
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
+ Reg = SrcReg;
+ else if (vrm_->isAssignedReg(SrcReg))
+ Reg = vrm_->getPhys(SrcReg);
+ if (Reg) {
+ if (SrcSubReg)
+ Reg = tri_->getSubReg(Reg, SrcSubReg);
+ if (DstSubReg)
+ Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
+ if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
+ mri_->setRegAllocationHint(cur->reg, 0, Reg);
+ }
}
}
}
@@ -1206,8 +1226,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
SmallVector<LiveInterval*, 8> spillIs;
std::vector<LiveInterval*> added;
-
- added = spiller_->spill(cur, spillIs);
+ spiller_->spill(cur, added, spillIs);
std::sort(added.begin(), added.end(), LISorter());
addStackInterval(cur, ls_, li_, mri_, *vrm_);
@@ -1285,10 +1304,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
if (sli->beginIndex() < earliestStart)
earliestStart = sli->beginIndex();
- std::vector<LiveInterval*> newIs;
- newIs = spiller_->spill(sli, spillIs, &earliestStart);
+ spiller_->spill(sli, added, spillIs, &earliestStart);
addStackInterval(sli, ls_, li_, mri_, *vrm_);
- std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(sli->reg);
}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocLocal.cpp b/contrib/llvm/lib/CodeGen/RegAllocLocal.cpp
deleted file mode 100644
index 321ae12..0000000
--- a/contrib/llvm/lib/CodeGen/RegAllocLocal.cpp
+++ /dev/null
@@ -1,1254 +0,0 @@
-//===-- RegAllocLocal.cpp - A BasicBlock generic register allocator -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This register allocator allocates registers to a basic block at a time,
-// attempting to keep values in registers and reusing registers as appropriate.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "regalloc"
-#include "llvm/BasicBlock.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
-#include <algorithm>
-using namespace llvm;
-
-STATISTIC(NumStores, "Number of stores added");
-STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumCopies, "Number of copies coalesced");
-
-static RegisterRegAlloc
- localRegAlloc("local", "local register allocator",
- createLocalRegisterAllocator);
-
-namespace {
- class RALocal : public MachineFunctionPass {
- public:
- static char ID;
- RALocal() : MachineFunctionPass(&ID), StackSlotForVirtReg(-1) {}
- private:
- const TargetMachine *TM;
- MachineFunction *MF;
- MachineRegisterInfo *MRI;
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
-
- // StackSlotForVirtReg - Maps virtual regs to the frame index where these
- // values are spilled.
- IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
-
- // Virt2PhysRegMap - This map contains entries for each virtual register
- // that is currently available in a physical register.
- IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysRegMap;
-
- unsigned &getVirt2PhysRegMapSlot(unsigned VirtReg) {
- return Virt2PhysRegMap[VirtReg];
- }
-
- // PhysRegsUsed - This array is effectively a map, containing entries for
- // each physical register that currently has a value (ie, it is in
- // Virt2PhysRegMap). The value mapped to is the virtual register
- // corresponding to the physical register (the inverse of the
- // Virt2PhysRegMap), or 0. The value is set to 0 if this register is pinned
- // because it is used by a future instruction, and to -2 if it is not
- // allocatable. If the entry for a physical register is -1, then the
- // physical register is "not in the map".
- //
- std::vector<int> PhysRegsUsed;
-
- // PhysRegsUseOrder - This contains a list of the physical registers that
- // currently have a virtual register value in them. This list provides an
- // ordering of registers, imposing a reallocation order. This list is only
- // used if all registers are allocated and we have to spill one, in which
- // case we spill the least recently used register. Entries at the front of
- // the list are the least recently used registers, entries at the back are
- // the most recently used.
- //
- std::vector<unsigned> PhysRegsUseOrder;
-
- // Virt2LastUseMap - This maps each virtual register to its last use
- // (MachineInstr*, operand index pair).
- IndexedMap<std::pair<MachineInstr*, unsigned>, VirtReg2IndexFunctor>
- Virt2LastUseMap;
-
- std::pair<MachineInstr*,unsigned>& getVirtRegLastUse(unsigned Reg) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- return Virt2LastUseMap[Reg];
- }
-
- // VirtRegModified - This bitset contains information about which virtual
- // registers need to be spilled back to memory when their registers are
- // scavenged. If a virtual register has simply been rematerialized, there
- // is no reason to spill it to memory when we need the register back.
- //
- BitVector VirtRegModified;
-
- // UsedInMultipleBlocks - Tracks whether a particular register is used in
- // more than one block.
- BitVector UsedInMultipleBlocks;
-
- void markVirtRegModified(unsigned Reg, bool Val = true) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- if (Val)
- VirtRegModified.set(Reg);
- else
- VirtRegModified.reset(Reg);
- }
-
- bool isVirtRegModified(unsigned Reg) const {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- assert(Reg - TargetRegisterInfo::FirstVirtualRegister <
- VirtRegModified.size() && "Illegal virtual register!");
- return VirtRegModified[Reg - TargetRegisterInfo::FirstVirtualRegister];
- }
-
- void AddToPhysRegsUseOrder(unsigned Reg) {
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), Reg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
- PhysRegsUseOrder.push_back(Reg);
- }
-
- void MarkPhysRegRecentlyUsed(unsigned Reg) {
- if (PhysRegsUseOrder.empty() ||
- PhysRegsUseOrder.back() == Reg) return; // Already most recently used
-
- for (unsigned i = PhysRegsUseOrder.size(); i != 0; --i) {
- unsigned RegMatch = PhysRegsUseOrder[i-1]; // remove from middle
- if (!areRegsEqual(Reg, RegMatch)) continue;
-
- PhysRegsUseOrder.erase(PhysRegsUseOrder.begin()+i-1);
- // Add it to the end of the list
- PhysRegsUseOrder.push_back(RegMatch);
- if (RegMatch == Reg)
- return; // Found an exact match, exit early
- }
- }
-
- public:
- virtual const char *getPassName() const {
- return "Local Register Allocator";
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequiredID(PHIEliminationID);
- AU.addRequiredID(TwoAddressInstructionPassID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- private:
- /// runOnMachineFunction - Register allocate the whole function
- bool runOnMachineFunction(MachineFunction &Fn);
-
- /// AllocateBasicBlock - Register allocate the specified basic block.
- void AllocateBasicBlock(MachineBasicBlock &MBB);
-
-
- /// areRegsEqual - This method returns true if the specified registers are
- /// related to each other. To do this, it checks to see if they are equal
- /// or if the first register is in the alias set of the second register.
- ///
- bool areRegsEqual(unsigned R1, unsigned R2) const {
- if (R1 == R2) return true;
- for (const unsigned *AliasSet = TRI->getAliasSet(R2);
- *AliasSet; ++AliasSet) {
- if (*AliasSet == R1) return true;
- }
- return false;
- }
-
- /// getStackSpaceFor - This returns the frame index of the specified virtual
- /// register on the stack, allocating space if necessary.
- int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
-
- /// removePhysReg - This method marks the specified physical register as no
- /// longer being in use.
- ///
- void removePhysReg(unsigned PhysReg);
-
- void storeVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg, bool isKill);
-
- /// spillVirtReg - This method spills the value specified by PhysReg into
- /// the virtual register slot specified by VirtReg. It then updates the RA
- /// data structures to indicate the fact that PhysReg is now available.
- ///
- void spillVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned VirtReg, unsigned PhysReg);
-
- /// spillPhysReg - This method spills the specified physical register into
- /// the virtual register slot associated with it. If OnlyVirtRegs is set to
- /// true, then the request is ignored if the physical register does not
- /// contain a virtual register.
- ///
- void spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs = false);
-
- /// assignVirtToPhysReg - This method updates local state so that we know
- /// that PhysReg is the proper container for VirtReg now. The physical
- /// register must not be used for anything else when this is called.
- ///
- void assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg);
-
- /// isPhysRegAvailable - Return true if the specified physical register is
- /// free and available for use. This also includes checking to see if
- /// aliased registers are all free...
- ///
- bool isPhysRegAvailable(unsigned PhysReg) const;
-
- /// getFreeReg - Look to see if there is a free register available in the
- /// specified register class. If not, return 0.
- ///
- unsigned getFreeReg(const TargetRegisterClass *RC);
-
- /// getReg - Find a physical register to hold the specified virtual
- /// register. If all compatible physical registers are used, this method
- /// spills the last used virtual register to the stack, and uses that
- /// register. If NoFree is true, that means the caller knows there isn't
- /// a free register, do not call getFreeReg().
- unsigned getReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned VirtReg, bool NoFree = false);
-
- /// reloadVirtReg - This method transforms the specified virtual
- /// register use to refer to a physical register. This method may do this
- /// in one of several ways: if the register is available in a physical
- /// register already, it uses that physical register. If the value is not
- /// in a physical register, and if there are physical registers available,
- /// it loads it into a register: PhysReg if that is an available physical
- /// register, otherwise any physical register of the right class.
- /// If register pressure is high, and it is possible, it tries to fold the
- /// load of the virtual register into the instruction itself. It avoids
- /// doing this if register pressure is low to improve the chance that
- /// subsequent instructions can use the reloaded value. This method
- /// returns the modified instruction.
- ///
- MachineInstr *reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum, SmallSet<unsigned, 4> &RRegs,
- unsigned PhysReg);
-
- /// ComputeLocalLiveness - Computes liveness of registers within a basic
- /// block, setting the killed/dead flags as appropriate.
- void ComputeLocalLiveness(MachineBasicBlock& MBB);
-
- void reloadPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I,
- unsigned PhysReg);
- };
- char RALocal::ID = 0;
-}
-
-/// getStackSpaceFor - This allocates space for the specified virtual register
-/// to be held on the stack.
-int RALocal::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
- // Find the location Reg would belong...
- int SS = StackSlotForVirtReg[VirtReg];
- if (SS != -1)
- return SS; // Already has space allocated?
-
- // Allocate a new stack object for this spill location...
- int FrameIdx = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
-
- // Assign the slot.
- StackSlotForVirtReg[VirtReg] = FrameIdx;
- return FrameIdx;
-}
-
-
-/// removePhysReg - This method marks the specified physical register as no
-/// longer being in use.
-///
-void RALocal::removePhysReg(unsigned PhysReg) {
- PhysRegsUsed[PhysReg] = -1; // PhyReg no longer used
-
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), PhysReg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
-}
-
-/// storeVirtReg - Store a virtual register to its assigned stack slot.
-void RALocal::storeVirtReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg,
- bool isKill) {
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << " to stack slot #" << FrameIndex);
- TII->storeRegToStackSlot(MBB, I, PhysReg, isKill, FrameIndex, RC, TRI);
- ++NumStores; // Update statistics
-
- // Mark the spill instruction as last use if we're not killing the register.
- if (!isKill) {
- MachineInstr *Spill = llvm::prior(I);
- int OpNum = Spill->findRegisterUseOperandIdx(PhysReg);
- if (OpNum < 0)
- getVirtRegLastUse(VirtReg) = std::make_pair((MachineInstr*)0, 0);
- else
- getVirtRegLastUse(VirtReg) = std::make_pair(Spill, OpNum);
- }
-}
-
-/// spillVirtReg - This method spills the value specified by PhysReg into the
-/// virtual register slot specified by VirtReg. It then updates the RA data
-/// structures to indicate the fact that PhysReg is now available.
-///
-void RALocal::spillVirtReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg) {
- assert(VirtReg && "Spilling a physical register is illegal!"
- " Must not have appropriate kill for the register or use exists beyond"
- " the intended one.");
- DEBUG(dbgs() << " Spilling register " << TRI->getName(PhysReg)
- << " containing %reg" << VirtReg);
-
- if (!isVirtRegModified(VirtReg)) {
- DEBUG(dbgs() << " which has not been modified, so no store necessary!");
- std::pair<MachineInstr*, unsigned> &LastUse = getVirtRegLastUse(VirtReg);
- if (LastUse.first)
- LastUse.first->getOperand(LastUse.second).setIsKill();
- } else {
- // Otherwise, there is a virtual register corresponding to this physical
- // register. We only need to spill it into its stack slot if it has been
- // modified.
- // If the instruction reads the register that's spilled, (e.g. this can
- // happen if it is a move to a physical register), then the spill
- // instruction is not a kill.
- bool isKill = !(I != MBB.end() && I->readsRegister(PhysReg));
- storeVirtReg(MBB, I, VirtReg, PhysReg, isKill);
- }
-
- getVirt2PhysRegMapSlot(VirtReg) = 0; // VirtReg no longer available
-
- DEBUG(dbgs() << '\n');
- removePhysReg(PhysReg);
-}
-
-
-/// spillPhysReg - This method spills the specified physical register into the
-/// virtual register slot associated with it. If OnlyVirtRegs is set to true,
-/// then the request is ignored if the physical register does not contain a
-/// virtual register.
-///
-void RALocal::spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs) {
- if (PhysRegsUsed[PhysReg] != -1) { // Only spill it if it's used!
- assert(PhysRegsUsed[PhysReg] != -2 && "Non allocable reg used!");
- if (PhysRegsUsed[PhysReg] || !OnlyVirtRegs)
- spillVirtReg(MBB, I, PhysRegsUsed[PhysReg], PhysReg);
- return;
- }
-
- // If the selected register aliases any other registers, we must make
- // sure that one of the aliases isn't alive.
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet) {
- if (PhysRegsUsed[*AliasSet] == -1 || // Spill aliased register.
- PhysRegsUsed[*AliasSet] == -2) // If allocatable.
- continue;
-
- if (PhysRegsUsed[*AliasSet])
- spillVirtReg(MBB, I, PhysRegsUsed[*AliasSet], *AliasSet);
- }
-}
-
-
-/// assignVirtToPhysReg - This method updates local state so that we know
-/// that PhysReg is the proper container for VirtReg now. The physical
-/// register must not be used for anything else when this is called.
-///
-void RALocal::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) {
- assert(PhysRegsUsed[PhysReg] == -1 && "Phys reg already assigned!");
- // Update information to note the fact that this register was just used, and
- // it holds VirtReg.
- PhysRegsUsed[PhysReg] = VirtReg;
- getVirt2PhysRegMapSlot(VirtReg) = PhysReg;
- AddToPhysRegsUseOrder(PhysReg); // New use of PhysReg
-}
-
-
-/// isPhysRegAvailable - Return true if the specified physical register is free
-/// and available for use. This also includes checking to see if aliased
-/// registers are all free...
-///
-bool RALocal::isPhysRegAvailable(unsigned PhysReg) const {
- if (PhysRegsUsed[PhysReg] != -1) return false;
-
- // If the selected register aliases any other allocated registers, it is
- // not free!
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet)
- if (PhysRegsUsed[*AliasSet] >= 0) // Aliased register in use?
- return false; // Can't use this reg then.
- return true;
-}
-
-
-/// getFreeReg - Look to see if there is a free register available in the
-/// specified register class. If not, return 0.
-///
-unsigned RALocal::getFreeReg(const TargetRegisterClass *RC) {
- // Get iterators defining the range of registers that are valid to allocate in
- // this class, which also specifies the preferred allocation order.
- TargetRegisterClass::iterator RI = RC->allocation_order_begin(*MF);
- TargetRegisterClass::iterator RE = RC->allocation_order_end(*MF);
-
- for (; RI != RE; ++RI)
- if (isPhysRegAvailable(*RI)) { // Is reg unused?
- assert(*RI != 0 && "Cannot use register!");
- return *RI; // Found an unused register!
- }
- return 0;
-}
-
-
-/// getReg - Find a physical register to hold the specified virtual
-/// register. If all compatible physical registers are used, this method spills
-/// the last used virtual register to the stack, and uses that register.
-///
-unsigned RALocal::getReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned VirtReg, bool NoFree) {
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
-
- // First check to see if we have a free register of the requested type...
- unsigned PhysReg = NoFree ? 0 : getFreeReg(RC);
-
- if (PhysReg != 0) {
- // Assign the register.
- assignVirtToPhysReg(VirtReg, PhysReg);
- return PhysReg;
- }
-
- // If we didn't find an unused register, scavenge one now!
- assert(!PhysRegsUseOrder.empty() && "No allocated registers??");
-
- // Loop over all of the preallocated registers from the least recently used
- // to the most recently used. When we find one that is capable of holding
- // our register, use it.
- for (unsigned i = 0; PhysReg == 0; ++i) {
- assert(i != PhysRegsUseOrder.size() &&
- "Couldn't find a register of the appropriate class!");
-
- unsigned R = PhysRegsUseOrder[i];
-
- // We can only use this register if it holds a virtual register (ie, it
- // can be spilled). Do not use it if it is an explicitly allocated
- // physical register!
- assert(PhysRegsUsed[R] != -1 &&
- "PhysReg in PhysRegsUseOrder, but is not allocated?");
- if (PhysRegsUsed[R] && PhysRegsUsed[R] != -2) {
- // If the current register is compatible, use it.
- if (RC->contains(R)) {
- PhysReg = R;
- break;
- }
-
- // If one of the registers aliased to the current register is
- // compatible, use it.
- for (const unsigned *AliasIt = TRI->getAliasSet(R);
- *AliasIt; ++AliasIt) {
- if (!RC->contains(*AliasIt)) continue;
-
- // If this is pinned down for some reason, don't use it. For
- // example, if CL is pinned, and we run across CH, don't use
- // CH as justification for using scavenging ECX (which will
- // fail).
- if (PhysRegsUsed[*AliasIt] == 0) continue;
-
- // Make sure the register is allocatable. Don't allocate SIL on
- // x86-32.
- if (PhysRegsUsed[*AliasIt] == -2) continue;
-
- PhysReg = *AliasIt; // Take an aliased register
- break;
- }
- }
- }
-
- assert(PhysReg && "Physical register not assigned!?!?");
-
- // At this point PhysRegsUseOrder[i] is the least recently used register of
- // compatible register class. Spill it to memory and reap its remains.
- spillPhysReg(MBB, I, PhysReg);
-
- // Now that we know which register we need to assign this to, do it now!
- assignVirtToPhysReg(VirtReg, PhysReg);
- return PhysReg;
-}
-
-
-/// reloadVirtReg - This method transforms the specified virtual
-/// register use to refer to a physical register. This method may do this in
-/// one of several ways: if the register is available in a physical register
-/// already, it uses that physical register. If the value is not in a physical
-/// register, and if there are physical registers available, it loads it into a
-/// register: PhysReg if that is an available physical register, otherwise any
-/// register. If register pressure is high, and it is possible, it tries to
-/// fold the load of the virtual register into the instruction itself. It
-/// avoids doing this if register pressure is low to improve the chance that
-/// subsequent instructions can use the reloaded value. This method returns
-/// the modified instruction.
-///
-MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum,
- SmallSet<unsigned, 4> &ReloadedRegs,
- unsigned PhysReg) {
- unsigned VirtReg = MI->getOperand(OpNum).getReg();
- unsigned SubIdx = MI->getOperand(OpNum).getSubReg();
-
- // If the virtual register is already available, just update the instruction
- // and return.
- if (unsigned PR = getVirt2PhysRegMapSlot(VirtReg)) {
- if (SubIdx) {
- PR = TRI->getSubReg(PR, SubIdx);
- MI->getOperand(OpNum).setSubReg(0);
- }
- MI->getOperand(OpNum).setReg(PR); // Assign the input register
- if (!MI->isDebugValue()) {
- // Do not do these for DBG_VALUE as they can affect codegen.
- MarkPhysRegRecentlyUsed(PR); // Already have this value available!
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
- }
- return MI;
- }
-
- // Otherwise, we need to fold it into the current instruction, or reload it.
- // If we have registers available to hold the value, use them.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- // If we already have a PhysReg (this happens when the instruction is a
- // reg-to-reg copy with a PhysReg destination) use that.
- if (!PhysReg || !TargetRegisterInfo::isPhysicalRegister(PhysReg) ||
- !isPhysRegAvailable(PhysReg))
- PhysReg = getFreeReg(RC);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
-
- if (PhysReg) { // Register is available, allocate it!
- assignVirtToPhysReg(VirtReg, PhysReg);
- } else { // No registers available.
- // Force some poor hapless value out of the register file to
- // make room for the new register, and reload it.
- PhysReg = getReg(MBB, MI, VirtReg, true);
- }
-
- markVirtRegModified(VirtReg, false); // Note that this reg was just reloaded
-
- DEBUG(dbgs() << " Reloading %reg" << VirtReg << " into "
- << TRI->getName(PhysReg) << "\n");
-
- // Add move instruction(s)
- TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC, TRI);
- ++NumLoads; // Update statistics
-
- MF->getRegInfo().setPhysRegUsed(PhysReg);
- // Assign the input register.
- if (SubIdx) {
- MI->getOperand(OpNum).setSubReg(0);
- MI->getOperand(OpNum).setReg(TRI->getSubReg(PhysReg, SubIdx));
- } else
- MI->getOperand(OpNum).setReg(PhysReg); // Assign the input register
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
-
- if (!ReloadedRegs.insert(PhysReg)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- report_fatal_error(Msg.str());
- }
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (ReloadedRegs.insert(*SubRegs)) continue;
-
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- report_fatal_error(Msg.str());
- }
-
- return MI;
-}
-
-/// isReadModWriteImplicitKill - True if this is an implicit kill for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitKill(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- MO.isDef() && !MO.isDead())
- return true;
- }
- return false;
-}
-
-/// isReadModWriteImplicitDef - True if this is an implicit def for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitDef(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- !MO.isDef() && MO.isKill())
- return true;
- }
- return false;
-}
-
-// precedes - Helper function to determine with MachineInstr A
-// precedes MachineInstr B within the same MBB.
-static bool precedes(MachineBasicBlock::iterator A,
- MachineBasicBlock::iterator B) {
- if (A == B)
- return false;
-
- MachineBasicBlock::iterator I = A->getParent()->begin();
- while (I != A->getParent()->end()) {
- if (I == A)
- return true;
- else if (I == B)
- return false;
-
- ++I;
- }
-
- return false;
-}
-
-/// ComputeLocalLiveness - Computes liveness of registers within a basic
-/// block, setting the killed/dead flags as appropriate.
-void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) {
- // Keep track of the most recently seen previous use or def of each reg,
- // so that we can update them with dead/kill markers.
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > LastUseDef;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
- continue;
-
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = I->getOperand(i);
- // Uses don't trigger any flags, but we need to save
- // them for later. Also, we have to process these
- // _before_ processing the defs, since an instr
- // uses regs before it defs them.
- if (!MO.isReg() || !MO.getReg() || !MO.isUse())
- continue;
-
- // Ignore helpful kill flags from earlier passes.
- MO.setIsKill(false);
-
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
-
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue;
-
- const unsigned *Aliases = TRI->getAliasSet(MO.getReg());
- if (Aliases == 0)
- continue;
-
- while (*Aliases) {
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- alias = LastUseDef.find(*Aliases);
-
- if (alias != LastUseDef.end() && alias->second.first != I)
- LastUseDef[*Aliases] = std::make_pair(I, i);
-
- ++Aliases;
- }
- }
-
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = I->getOperand(i);
- // Defs others than 2-addr redefs _do_ trigger flag changes:
- // - A def followed by a def is dead
- // - A use followed by a def is a kill
- if (!MO.isReg() || !MO.getReg() || !MO.isDef()) continue;
-
- unsigned SubIdx = MO.getSubReg();
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- last = LastUseDef.find(MO.getReg());
- if (last != LastUseDef.end()) {
- // Check if this is a two address instruction. If so, then
- // the def does not kill the use.
- if (last->second.first == I && I->isRegTiedToUseOperand(i))
- continue;
-
- MachineOperand &lastUD =
- last->second.first->getOperand(last->second.second);
- if (SubIdx && lastUD.getSubReg() != SubIdx)
- // Partial re-def, the last def is not dead.
- // %reg1024:5<def> =
- // %reg1024:6<def> =
- // or
- // %reg1024:5<def> = op %reg1024, 5
- continue;
-
- if (lastUD.isDef())
- lastUD.setIsDead(true);
- else
- lastUD.setIsKill(true);
- }
-
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
- }
- }
-
- // Live-out (of the function) registers contain return values of the function,
- // so we need to make sure they are alive at return time.
- MachineBasicBlock::iterator Ret = MBB.getFirstTerminator();
- bool BBEndsInReturn = (Ret != MBB.end() && Ret->getDesc().isReturn());
-
- if (BBEndsInReturn)
- for (MachineRegisterInfo::liveout_iterator
- I = MF->getRegInfo().liveout_begin(),
- E = MF->getRegInfo().liveout_end(); I != E; ++I)
- if (!Ret->readsRegister(*I)) {
- Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
- LastUseDef[*I] = std::make_pair(Ret, Ret->getNumOperands()-1);
- }
-
- // Finally, loop over the final use/def of each reg
- // in the block and determine if it is dead.
- for (DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- I = LastUseDef.begin(), E = LastUseDef.end(); I != E; ++I) {
- MachineInstr *MI = I->second.first;
- unsigned idx = I->second.second;
- MachineOperand &MO = MI->getOperand(idx);
-
- bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(MO.getReg());
-
- // A crude approximation of "live-out" calculation
- bool usedOutsideBlock = isPhysReg ? false :
- UsedInMultipleBlocks.test(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
-
- // If the machine BB ends in a return instruction, then the value isn't used
- // outside of the BB.
- if (!isPhysReg && (!usedOutsideBlock || BBEndsInReturn)) {
- // DBG_VALUE complicates this: if the only refs of a register outside
- // this block are DBG_VALUE, we can't keep the reg live just for that,
- // as it will cause the reg to be spilled at the end of this block when
- // it wouldn't have been otherwise. Nullify the DBG_VALUEs when that
- // happens.
- bool UsedByDebugValueOnly = false;
- for (MachineRegisterInfo::reg_iterator UI = MRI->reg_begin(MO.getReg()),
- UE = MRI->reg_end(); UI != UE; ++UI) {
- // Two cases:
- // - used in another block
- // - used in the same block before it is defined (loop)
- if (UI->getParent() == &MBB &&
- !(MO.isDef() && UI.getOperand().isUse() && precedes(&*UI, MI)))
- continue;
-
- if (UI->isDebugValue()) {
- UsedByDebugValueOnly = true;
- continue;
- }
-
- // A non-DBG_VALUE use means we can leave DBG_VALUE uses alone.
- UsedInMultipleBlocks.set(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
- usedOutsideBlock = true;
- UsedByDebugValueOnly = false;
- break;
- }
-
- if (UsedByDebugValueOnly)
- for (MachineRegisterInfo::reg_iterator UI = MRI->reg_begin(MO.getReg()),
- UE = MRI->reg_end(); UI != UE; ++UI)
- if (UI->isDebugValue() &&
- (UI->getParent() != &MBB ||
- (MO.isDef() && precedes(&*UI, MI))))
- UI.getOperand().setReg(0U);
- }
-
- // Physical registers and those that are not live-out of the block are
- // killed/dead at their last use/def within this block.
- if (isPhysReg || !usedOutsideBlock || BBEndsInReturn) {
- if (MO.isUse()) {
- // Don't mark uses that are tied to defs as kills.
- if (!MI->isRegTiedToDefOperand(idx))
- MO.setIsKill(true);
- } else {
- MO.setIsDead(true);
- }
- }
- }
-}
-
-void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
- // loop over each instruction
- MachineBasicBlock::iterator MII = MBB.begin();
-
- DEBUG({
- const BasicBlock *LBB = MBB.getBasicBlock();
- if (LBB)
- dbgs() << "\nStarting RegAlloc of BB: " << LBB->getName();
- });
-
- // Add live-in registers as active.
- for (MachineBasicBlock::livein_iterator I = MBB.livein_begin(),
- E = MBB.livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- MF->getRegInfo().setPhysRegUsed(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
-
- ComputeLocalLiveness(MBB);
-
- // Otherwise, sequentially allocate each instruction in the MBB.
- while (MII != MBB.end()) {
- MachineInstr *MI = MII++;
- const TargetInstrDesc &TID = MI->getDesc();
- DEBUG({
- dbgs() << "\nStarting RegAlloc of: " << *MI;
- dbgs() << " Regs have values: ";
- for (unsigned i = 0; i != TRI->getNumRegs(); ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2) {
- if (PhysRegsUsed[i] && isVirtRegModified(PhysRegsUsed[i]))
- dbgs() << "*";
- dbgs() << "[" << TRI->getName(i)
- << ",%reg" << PhysRegsUsed[i] << "] ";
- }
- dbgs() << '\n';
- });
-
- // Determine whether this is a copy instruction. The cases where the
- // source or destination are phys regs are handled specially.
- unsigned SrcCopyReg, DstCopyReg, SrcCopySubReg, DstCopySubReg;
- unsigned SrcCopyPhysReg = 0U;
- bool isCopy = TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg) &&
- SrcCopySubReg == DstCopySubReg;
- if (isCopy && TargetRegisterInfo::isVirtualRegister(SrcCopyReg))
- SrcCopyPhysReg = getVirt2PhysRegMapSlot(SrcCopyReg);
-
- // Loop over the implicit uses, making sure that they are at the head of the
- // use order list, so they don't get reallocated.
- if (TID.ImplicitUses) {
- for (const unsigned *ImplicitUses = TID.ImplicitUses;
- *ImplicitUses; ++ImplicitUses)
- MarkPhysRegRecentlyUsed(*ImplicitUses);
- }
-
- SmallVector<unsigned, 8> Kills;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isKill()) continue;
-
- if (!MO.isImplicit())
- Kills.push_back(MO.getReg());
- else if (!isReadModWriteImplicitKill(MI, MO.getReg()))
- // These are extra physical register kills when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- Kills.push_back(MO.getReg());
- }
-
- // If any physical regs are earlyclobber, spill any value they might
- // have in them, then mark them unallocatable.
- // If any virtual regs are earlyclobber, allocate them now (before
- // freeing inputs that are killed).
- if (MI->isInlineAsm()) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber() ||
- !MO.getReg())
- continue;
-
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg)))
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) =
- std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
- if (unsigned DestSubIdx = MO.getSubReg()) {
- MO.setSubReg(0);
- DestPhysReg = TRI->getSubReg(DestPhysReg, DestSubIdx);
- }
- MO.setReg(DestPhysReg); // Assign the earlyclobber register
- } else {
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
- }
- }
-
- // If a DBG_VALUE says something is located in a spilled register,
- // change the DBG_VALUE to be undef, which prevents the register
- // from being reloaded here. Doing that would change the generated
- // code, unless another use immediately follows this instruction.
- if (MI->isDebugValue() &&
- MI->getNumOperands()==3 && MI->getOperand(0).isReg()) {
- unsigned VirtReg = MI->getOperand(0).getReg();
- if (VirtReg && TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- !getVirt2PhysRegMapSlot(VirtReg))
- MI->getOperand(0).setReg(0U);
- }
-
- // Get the used operands into registers. This has the potential to spill
- // incoming values if we are out of registers. Note that we completely
- // ignore physical register uses here. We assume that if an explicit
- // physical register is referenced by the instruction, that it is guaranteed
- // to be live-in, or the input is badly hosed.
- //
- SmallSet<unsigned, 4> ReloadedRegs;
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand &MO = MI->getOperand(i);
- // here we are looking for only used operands (never def&use)
- if (MO.isReg() && !MO.isDef() && MO.getReg() && !MO.isImplicit() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- MI = reloadVirtReg(MBB, MI, i, ReloadedRegs,
- isCopy ? DstCopyReg : 0);
- }
-
- // If this instruction is the last user of this register, kill the
- // value, freeing the register being used, so it doesn't need to be
- // spilled to memory.
- //
- for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
- unsigned VirtReg = Kills[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- // If the virtual register was never materialized into a register, it
- // might not be in the map, but it won't hurt to zero it out anyway.
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- } else {
- assert((!PhysRegsUsed[PhysReg] || PhysRegsUsed[PhysReg] == -1) &&
- "Silently clearing a virtual register?");
- }
-
- if (!PhysReg) continue;
-
- DEBUG(dbgs() << " Last use of " << TRI->getName(PhysReg)
- << "[%reg" << VirtReg <<"], removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- DEBUG(dbgs() << " Last use of "
- << TRI->getName(*SubRegs) << "[%reg" << VirtReg
- <<"], removing it from live set\n");
- removePhysReg(*SubRegs);
- }
- }
- }
-
- // Loop over all of the operands of the instruction, spilling registers that
- // are defined, and marking explicit destinations in the PhysRegsUsed map.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || MO.isImplicit() || !MO.getReg() ||
- MO.isEarlyClobber() ||
- !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
- continue;
-
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
-
- // Loop over the implicit defs, spilling them as well.
- if (TID.ImplicitDefs) {
- for (const unsigned *ImplicitDefs = TID.ImplicitDefs;
- *ImplicitDefs; ++ImplicitDefs) {
- unsigned Reg = *ImplicitDefs;
- if (PhysRegsUsed[Reg] != -2) {
- spillPhysReg(MBB, MI, Reg, true);
- AddToPhysRegsUseOrder(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- }
- MF->getRegInfo().setPhysRegUsed(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
- }
-
- SmallVector<unsigned, 8> DeadDefs;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDead())
- DeadDefs.push_back(MO.getReg());
- }
-
- // Okay, we have allocated all of the source operands and spilled any values
- // that would be destroyed by defs of this instruction. Loop over the
- // explicit defs and assign them to a register, spilling incoming values if
- // we need to scavenge a register.
- //
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg() ||
- MO.isEarlyClobber() ||
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- continue;
-
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg))) {
- // If this is a copy try to reuse the input as the output;
- // that will make the copy go away.
- // If this is a copy, the source reg is a phys reg, and
- // that reg is available, use that phys reg for DestPhysReg.
- // If this is a copy, the source reg is a virtual reg, and
- // the phys reg that was assigned to that virtual reg is now
- // available, use that phys reg for DestPhysReg. (If it's now
- // available that means this was the last use of the source.)
- if (isCopy &&
- TargetRegisterInfo::isPhysicalRegister(SrcCopyReg) &&
- isPhysRegAvailable(SrcCopyReg)) {
- DestPhysReg = SrcCopyReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else if (isCopy &&
- TargetRegisterInfo::isVirtualRegister(SrcCopyReg) &&
- SrcCopyPhysReg && isPhysRegAvailable(SrcCopyPhysReg) &&
- MF->getRegInfo().getRegClass(DestVirtReg)->
- contains(SrcCopyPhysReg)) {
- DestPhysReg = SrcCopyPhysReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- }
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) = std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
-
- if (unsigned DestSubIdx = MO.getSubReg()) {
- MO.setSubReg(0);
- DestPhysReg = TRI->getSubReg(DestPhysReg, DestSubIdx);
- }
- MO.setReg(DestPhysReg); // Assign the output register
- }
-
- // If this instruction defines any registers that are immediately dead,
- // kill them now.
- //
- for (unsigned i = 0, e = DeadDefs.size(); i != e; ++i) {
- unsigned VirtReg = DeadDefs[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- assert(PhysReg != 0);
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- } else if (!PhysReg)
- continue;
-
- DEBUG(dbgs() << " Register " << TRI->getName(PhysReg)
- << " [%reg" << VirtReg
- << "] is never used, removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet) {
- if (PhysRegsUsed[*AliasSet] != -2) {
- DEBUG(dbgs() << " Register " << TRI->getName(*AliasSet)
- << " [%reg" << *AliasSet
- << "] is never used, removing it from live set\n");
- removePhysReg(*AliasSet);
- }
- }
- }
-
- // If this instruction is a call, make sure there are no dirty registers. The
- // call might throw an exception, and the landing pad expects to find all
- // registers in stack slots.
- if (TID.isCall())
- for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- if (PhysRegsUsed[i] <= 0) continue;
- unsigned VirtReg = PhysRegsUsed[i];
- if (!isVirtRegModified(VirtReg)) continue;
- DEBUG(dbgs() << " Storing dirty %reg" << VirtReg);
- storeVirtReg(MBB, MI, VirtReg, i, false);
- markVirtRegModified(VirtReg, false);
- DEBUG(dbgs() << " because the call might throw\n");
- }
-
- // Finally, if this is a noop copy instruction, zap it. (Except that if
- // the copy is dead, it must be kept to avoid messing up liveness info for
- // the register scavenger. See pr4100.)
- if (TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg) &&
- SrcCopyReg == DstCopyReg && SrcCopySubReg == DstCopySubReg &&
- DeadDefs.empty()) {
- ++NumCopies;
- MBB.erase(MI);
- }
- }
-
- MachineBasicBlock::iterator MI = MBB.getFirstTerminator();
-
- // Spill all physical registers holding virtual registers now.
- for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2) {
- if (unsigned VirtReg = PhysRegsUsed[i])
- spillVirtReg(MBB, MI, VirtReg, i);
- else
- removePhysReg(i);
- }
-
-#if 0
- // This checking code is very expensive.
- bool AllOk = true;
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i)
- if (unsigned PR = Virt2PhysRegMap[i]) {
- cerr << "Register still mapped: " << i << " -> " << PR << "\n";
- AllOk = false;
- }
- assert(AllOk && "Virtual registers still in phys regs?");
-#endif
-
- // Clear any physical register which appear live at the end of the basic
- // block, but which do not hold any virtual registers. e.g., the stack
- // pointer.
- PhysRegsUseOrder.clear();
-}
-
-/// runOnMachineFunction - Register allocate the whole function
-///
-bool RALocal::runOnMachineFunction(MachineFunction &Fn) {
- DEBUG(dbgs() << "Machine Function\n");
- MF = &Fn;
- MRI = &Fn.getRegInfo();
- TM = &Fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
-
- PhysRegsUsed.assign(TRI->getNumRegs(), -1);
-
- // At various places we want to efficiently check to see whether a register
- // is allocatable. To handle this, we mark all unallocatable registers as
- // being pinned down, permanently.
- {
- BitVector Allocable = TRI->getAllocatableSet(Fn);
- for (unsigned i = 0, e = Allocable.size(); i != e; ++i)
- if (!Allocable[i])
- PhysRegsUsed[i] = -2; // Mark the reg unallocable.
- }
-
- // initialize the virtual->physical register map to have a 'null'
- // mapping for all virtual registers
- unsigned LastVirtReg = MF->getRegInfo().getLastVirtReg();
- StackSlotForVirtReg.grow(LastVirtReg);
- Virt2PhysRegMap.grow(LastVirtReg);
- Virt2LastUseMap.grow(LastVirtReg);
- VirtRegModified.resize(LastVirtReg+1 -
- TargetRegisterInfo::FirstVirtualRegister);
- UsedInMultipleBlocks.resize(LastVirtReg+1 -
- TargetRegisterInfo::FirstVirtualRegister);
-
- // Loop over all of the basic blocks, eliminating virtual register references
- for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
- MBB != MBBe; ++MBB)
- AllocateBasicBlock(*MBB);
-
- StackSlotForVirtReg.clear();
- PhysRegsUsed.clear();
- VirtRegModified.clear();
- UsedInMultipleBlocks.clear();
- Virt2PhysRegMap.clear();
- Virt2LastUseMap.clear();
- return true;
-}
-
-FunctionPass *llvm::createLocalRegisterAllocator() {
- return new RALocal();
-}
diff --git a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
index 4fafd28..7e61a12 100644
--- a/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
+++ b/contrib/llvm/lib/CodeGen/RegAllocPBQP.cpp
@@ -396,28 +396,23 @@ PBQPRegAlloc::CoalesceMap PBQPRegAlloc::findCoalesces() {
if (srcRegIsPhysical && dstRegIsPhysical)
continue;
- // If it's a copy that includes a virtual register but the source and
- // destination classes differ then we can't coalesce, so continue with
- // the next instruction.
- const TargetRegisterClass *srcRegClass = srcRegIsPhysical ?
- tri->getPhysicalRegisterRegClass(srcReg) : mri->getRegClass(srcReg);
-
- const TargetRegisterClass *dstRegClass = dstRegIsPhysical ?
- tri->getPhysicalRegisterRegClass(dstReg) : mri->getRegClass(dstReg);
-
- if (srcRegClass != dstRegClass)
+ // If it's a copy that includes two virtual register but the source and
+ // destination classes differ then we can't coalesce.
+ if (!srcRegIsPhysical && !dstRegIsPhysical &&
+ mri->getRegClass(srcReg) != mri->getRegClass(dstReg))
continue;
- // We also need any physical regs to be allocable, coalescing with
- // a non-allocable register is invalid.
- if (srcRegIsPhysical) {
+ // If one is physical and one is virtual, check that the physical is
+ // allocatable in the class of the virtual.
+ if (srcRegIsPhysical && !dstRegIsPhysical) {
+ const TargetRegisterClass *dstRegClass = mri->getRegClass(dstReg);
if (std::find(dstRegClass->allocation_order_begin(*mf),
dstRegClass->allocation_order_end(*mf), srcReg) ==
dstRegClass->allocation_order_end(*mf))
continue;
}
-
- if (dstRegIsPhysical) {
+ if (!srcRegIsPhysical && dstRegIsPhysical) {
+ const TargetRegisterClass *srcRegClass = mri->getRegClass(srcReg);
if (std::find(srcRegClass->allocation_order_begin(*mf),
srcRegClass->allocation_order_end(*mf), dstReg) ==
srcRegClass->allocation_order_end(*mf))
diff --git a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 1131e3d..ab0bc2d 100644
--- a/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -16,6 +16,8 @@
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Pass.h"
@@ -33,6 +35,160 @@ char RegisterCoalescer::ID = 0;
//
RegisterCoalescer::~RegisterCoalescer() {}
+unsigned CoalescerPair::compose(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return tri_.composeSubRegIndices(a, b);
+}
+
+bool CoalescerPair::isMoveInstr(const MachineInstr *MI,
+ unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const {
+ if (MI->isCopy()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = MI->getOperand(0).getSubReg();
+ Src = MI->getOperand(1).getReg();
+ SrcSub = MI->getOperand(1).getSubReg();
+ } else if (MI->isSubregToReg()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
+ Src = MI->getOperand(2).getReg();
+ SrcSub = MI->getOperand(2).getSubReg();
+ } else if (!tii_.isMoveInstr(*MI, Src, Dst, SrcSub, DstSub)) {
+ return false;
+ }
+ return true;
+}
+
+bool CoalescerPair::setRegisters(const MachineInstr *MI) {
+ srcReg_ = dstReg_ = subIdx_ = 0;
+ newRC_ = 0;
+ flipped_ = crossClass_ = false;
+
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+ partial_ = SrcSub || DstSub;
+
+ // If one register is a physreg, it must be Dst.
+ if (TargetRegisterInfo::isPhysicalRegister(Src)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ flipped_ = true;
+ }
+
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+
+ if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
+ // Eliminate DstSub on a physreg.
+ if (DstSub) {
+ Dst = tri_.getSubReg(Dst, DstSub);
+ if (!Dst) return false;
+ DstSub = 0;
+ }
+
+ // Eliminate SrcSub by picking a corresponding Dst superregister.
+ if (SrcSub) {
+ Dst = tri_.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
+ if (!Dst) return false;
+ SrcSub = 0;
+ } else if (!MRI.getRegClass(Src)->contains(Dst)) {
+ return false;
+ }
+ } else {
+ // Both registers are virtual.
+
+ // Both registers have subreg indices.
+ if (SrcSub && DstSub) {
+ // For now we only handle the case of identical indices in commensurate
+ // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
+ // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
+ if (SrcSub != DstSub)
+ return false;
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ if (!getCommonSubClass(DstRC, SrcRC))
+ return false;
+ SrcSub = DstSub = 0;
+ }
+
+ // There can be no SrcSub.
+ if (SrcSub) {
+ std::swap(Src, Dst);
+ DstSub = SrcSub;
+ SrcSub = 0;
+ assert(!flipped_ && "Unexpected flip");
+ flipped_ = true;
+ }
+
+ // Find the new register class.
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ if (DstSub)
+ newRC_ = tri_.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
+ else
+ newRC_ = getCommonSubClass(DstRC, SrcRC);
+ if (!newRC_)
+ return false;
+ crossClass_ = newRC_ != DstRC || newRC_ != SrcRC;
+ }
+ // Check our invariants
+ assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
+ assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
+ "Cannot have a physical SubIdx");
+ srcReg_ = Src;
+ dstReg_ = Dst;
+ subIdx_ = DstSub;
+ return true;
+}
+
+bool CoalescerPair::flip() {
+ if (subIdx_ || TargetRegisterInfo::isPhysicalRegister(dstReg_))
+ return false;
+ std::swap(srcReg_, dstReg_);
+ flipped_ = !flipped_;
+ return true;
+}
+
+bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
+ if (!MI)
+ return false;
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+
+ // Find the virtual register that is srcReg_.
+ if (Dst == srcReg_) {
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ } else if (Src != srcReg_) {
+ return false;
+ }
+
+ // Now check that Dst matches dstReg_.
+ if (TargetRegisterInfo::isPhysicalRegister(dstReg_)) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ assert(!subIdx_ && "Inconsistent CoalescerPair state.");
+ // DstSub could be set for a physreg from INSERT_SUBREG.
+ if (DstSub)
+ Dst = tri_.getSubReg(Dst, DstSub);
+ // Full copy of Src.
+ if (!SrcSub)
+ return dstReg_ == Dst;
+ // This is a partial register copy. Check that the parts match.
+ return tri_.getSubReg(dstReg_, SrcSub) == Dst;
+ } else {
+ // dstReg_ is virtual.
+ if (dstReg_ != Dst)
+ return false;
+ // Registers match, do the subregisters line up?
+ return compose(subIdx_, SrcSub) == DstSub;
+ }
+}
+
// Because of the way .a files work, we must force the SimpleRC
// implementation to be pulled in if the RegisterCoalescer classes are
// pulled in. Otherwise we run the risk of RegisterCoalescer being
diff --git a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
index 690e59f..43b3fb6 100644
--- a/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
+++ b/contrib/llvm/lib/CodeGen/RegisterScavenging.cpp
@@ -141,6 +141,10 @@ void RegScavenger::forward() {
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
+ // FIXME: The scavenger is not predication aware. If the instruction is
+ // predicated, conservatively assume "kill" markers do not actually kill the
+ // register. Similarly ignores "dead" markers.
+ bool isPred = TII->isPredicated(MI);
BitVector EarlyClobberRegs(NumPhysRegs);
BitVector KillRegs(NumPhysRegs);
BitVector DefRegs(NumPhysRegs);
@@ -155,11 +159,11 @@ void RegScavenger::forward() {
if (MO.isUse()) {
// Two-address operands implicitly kill.
- if (MO.isKill() || MI->isRegTiedToDefOperand(i))
+ if (!isPred && (MO.isKill() || MI->isRegTiedToDefOperand(i)))
addRegWithSubRegs(KillRegs, Reg);
} else {
assert(MO.isDef());
- if (MO.isDead())
+ if (!isPred && MO.isDead())
addRegWithSubRegs(DeadRegs, Reg);
else
addRegWithSubRegs(DefRegs, Reg);
@@ -238,8 +242,18 @@ unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const {
return 0;
}
+/// getRegsAvailable - Return all available registers in the register class
+/// in Mask.
+void RegScavenger::getRegsAvailable(const TargetRegisterClass *RC,
+ BitVector &Mask) {
+ for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
+ I != E; ++I)
+ if (!isAliasUsed(*I))
+ Mask.set(*I);
+}
+
/// findSurvivorReg - Return the candidate register that is unused for the
-/// longest after MBBI. UseMI is set to the instruction where the search
+/// longest after StargMII. UseMI is set to the instruction where the search
/// stopped.
///
/// No more than InstrLimit instructions are inspected.
@@ -258,6 +272,10 @@ unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
bool inVirtLiveRange = false;
for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
+ if (MI->isDebugValue()) {
+ ++InstrLimit; // Don't count debug instructions
+ continue;
+ }
bool isVirtKillInsn = false;
bool isVirtDefInsn = false;
// Remove any candidates touched by instruction.
@@ -321,13 +339,16 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
Candidates.reset(MO.getReg());
}
+ // Try to find a register that's unused if there is one, as then we won't
+ // have to spill.
+ if ((Candidates & RegsAvailable).any())
+ Candidates &= RegsAvailable;
+
// Find the register whose use is furthest away.
MachineBasicBlock::iterator UseMI;
unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI);
- // If we found an unused register there is no reason to spill it. We have
- // probably found a callee-saved register that has been saved in the
- // prologue, but happens to be unused at this point.
+ // If we found an unused register there is no reason to spill it.
if (!isAliasUsed(SReg))
return SReg;
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
index da20c12..7d39dc4 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -380,26 +380,26 @@ void ScheduleDAG::VerifySchedule(bool isBottomUp) {
}
#endif
-/// InitDAGTopologicalSorting - create the initial topological
+/// InitDAGTopologicalSorting - create the initial topological
/// ordering from the DAG to be scheduled.
///
-/// The idea of the algorithm is taken from
+/// The idea of the algorithm is taken from
/// "Online algorithms for managing the topological order of
/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
-/// This is the MNR algorithm, which was first introduced by
-/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
+/// This is the MNR algorithm, which was first introduced by
+/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
/// "Maintaining a topological order under edge insertions".
///
-/// Short description of the algorithm:
+/// Short description of the algorithm:
///
/// Topological ordering, ord, of a DAG maps each node to a topological
/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
///
-/// This means that if there is a path from the node X to the node Z,
+/// This means that if there is a path from the node X to the node Z,
/// then ord(X) < ord(Z).
///
/// This property can be used to check for reachability of nodes:
-/// if Z is reachable from X, then an insertion of the edge Z->X would
+/// if Z is reachable from X, then an insertion of the edge Z->X would
/// create a cycle.
///
/// The algorithm first computes a topological ordering for the DAG by
@@ -431,7 +431,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
// Collect leaf nodes.
WorkList.push_back(SU);
}
- }
+ }
int Id = DAGSize;
while (!WorkList.empty()) {
@@ -456,7 +456,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
SUnit *SU = &SUnits[i];
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
+ assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
"Wrong topological sorting");
}
}
@@ -494,7 +494,7 @@ void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
bool& HasLoop) {
std::vector<const SUnit*> WorkList;
- WorkList.reserve(SUnits.size());
+ WorkList.reserve(SUnits.size());
WorkList.push_back(SU);
do {
@@ -504,20 +504,20 @@ void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
for (int I = SU->Succs.size()-1; I >= 0; --I) {
int s = SU->Succs[I].getSUnit()->NodeNum;
if (Node2Index[s] == UpperBound) {
- HasLoop = true;
+ HasLoop = true;
return;
}
// Visit successors if not already and in affected region.
if (!Visited.test(s) && Node2Index[s] < UpperBound) {
WorkList.push_back(SU->Succs[I].getSUnit());
- }
- }
+ }
+ }
} while (!WorkList.empty());
}
-/// Shift - Renumber the nodes so that the topological ordering is
+/// Shift - Renumber the nodes so that the topological ordering is
/// preserved.
-void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
+void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
int UpperBound) {
std::vector<int> L;
int shift = 0;
@@ -568,7 +568,7 @@ bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
// Is Ord(TargetSU) < Ord(SU) ?
if (LowerBound < UpperBound) {
Visited.reset();
- // There may be a path from TargetSU to SU. Check for it.
+ // There may be a path from TargetSU to SU. Check for it.
DFS(TargetSU, UpperBound, HasLoop);
}
return HasLoop;
@@ -580,8 +580,7 @@ void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
Index2Node[index] = n;
}
-ScheduleDAGTopologicalSort::ScheduleDAGTopologicalSort(
- std::vector<SUnit> &sunits)
- : SUnits(sunits) {}
+ScheduleDAGTopologicalSort::
+ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits) : SUnits(sunits) {}
ScheduleHazardRecognizer::~ScheduleHazardRecognizer() {}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
index ee08e1d..0a2fb37 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGEmit.cpp
@@ -50,11 +50,8 @@ void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
break;
}
}
- bool Success = TII->copyRegToReg(*BB, InsertPos, Reg, VRI->second,
- SU->CopyDstRC, SU->CopySrcRC,
- DebugLoc());
- (void)Success;
- assert(Success && "copyRegToReg failed!");
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
+ .addReg(VRI->second);
} else {
// Copy from physical register.
assert(I->getReg() && "Unknown physical register!");
@@ -62,11 +59,8 @@ void ScheduleDAG::EmitPhysRegCopy(SUnit *SU,
bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
isNew = isNew; // Silence compiler warning.
assert(isNew && "Node emitted out of order - early");
- bool Success = TII->copyRegToReg(*BB, InsertPos, VRBase, I->getReg(),
- SU->CopyDstRC, SU->CopySrcRC,
- DebugLoc());
- (void)Success;
- assert(Success && "copyRegToReg failed!");
+ BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
+ .addReg(I->getReg());
}
break;
}
diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
index ad82db2..d90659b 100644
--- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.h
@@ -69,8 +69,10 @@ namespace llvm {
const SmallSet<unsigned, 8> &LoopLiveIns) {
unsigned Count = 0;
for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I, ++Count) {
+ I != E; ++I) {
const MachineInstr *MI = I;
+ if (MI->isDebugValue())
+ continue;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isUse())
@@ -79,6 +81,7 @@ namespace llvm {
if (LoopLiveIns.count(MOReg))
Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
}
+ ++Count; // Not every iteration due to dbg_value above.
}
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt b/contrib/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
index 0cfd5e1..799988a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/CMakeLists.txt
@@ -1,5 +1,4 @@
add_llvm_library(LLVMSelectionDAG
- CallingConvLower.cpp
DAGCombiner.cpp
FastISel.cpp
FunctionLoweringInfo.cpp
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 6bddd78..e671752 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -211,6 +211,7 @@ namespace {
SDValue visitBUILD_VECTOR(SDNode *N);
SDValue visitCONCAT_VECTORS(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
+ SDValue visitMEMBARRIER(SDNode *N);
SDValue XformToShuffleWithZero(SDNode *N);
SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
@@ -668,7 +669,7 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
: LD->getExtensionType();
Replace = true;
- return DAG.getExtLoad(ExtType, dl, PVT,
+ return DAG.getExtLoad(ExtType, PVT, dl,
LD->getChain(), LD->getBasePtr(),
LD->getSrcValue(), LD->getSrcValueOffset(),
MemVT, LD->isVolatile(),
@@ -890,7 +891,7 @@ bool DAGCombiner::PromoteLoad(SDValue Op) {
ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD)
? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT) ? ISD::ZEXTLOAD : ISD::EXTLOAD)
: LD->getExtensionType();
- SDValue NewLD = DAG.getExtLoad(ExtType, dl, PVT,
+ SDValue NewLD = DAG.getExtLoad(ExtType, PVT, dl,
LD->getChain(), LD->getBasePtr(),
LD->getSrcValue(), LD->getSrcValueOffset(),
MemVT, LD->isVolatile(),
@@ -1079,6 +1080,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
+ case ISD::MEMBARRIER: return visitMEMBARRIER(N);
}
return SDValue();
}
@@ -1313,7 +1315,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA) && N1C &&
GA->getOpcode() == ISD::GlobalAddress)
- return DAG.getGlobalAddress(GA->getGlobal(), VT,
+ return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
GA->getOffset() +
(uint64_t)N1C->getSExtValue());
// fold ((c1-A)+c2) -> (c1+c2)-A
@@ -1550,7 +1552,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) {
// fold (sub Sym, c) -> Sym-c
if (N1C && GA->getOpcode() == ISD::GlobalAddress)
- return DAG.getGlobalAddress(GA->getGlobal(), VT,
+ return DAG.getGlobalAddress(GA->getGlobal(), N1C->getDebugLoc(), VT,
GA->getOffset() -
(uint64_t)N1C->getSExtValue());
// fold (sub Sym+c1, Sym+c2) -> c1-c2
@@ -2028,7 +2030,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
- // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y))
+ // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
//
// do not sink logical op inside of a vector extend, since it may combine
// into a vsetcc.
@@ -2038,7 +2040,10 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) {
// Avoid infinite looping with PromoteIntBinOp.
(N0.getOpcode() == ISD::ANY_EXTEND &&
(!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
- (N0.getOpcode() == ISD::TRUNCATE && TLI.isTypeLegal(Op0VT))) &&
+ (N0.getOpcode() == ISD::TRUNCATE &&
+ (!TLI.isZExtFree(VT, Op0VT) ||
+ !TLI.isTruncateFree(Op0VT, VT)) &&
+ TLI.isTypeLegal(Op0VT))) &&
!VT.isVector() &&
Op0VT == N1.getOperand(0).getValueType() &&
(!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
@@ -2193,7 +2198,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -2216,7 +2221,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT))) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N0.getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N0.getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -2250,7 +2255,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue NewLoad =
- DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
+ DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(), LN0->getSrcValueOffset(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
@@ -2286,7 +2291,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue Load =
- DAG.getExtLoad(ISD::ZEXTLOAD, LN0->getDebugLoc(), LoadResultTy,
+ DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset(),
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
@@ -2317,7 +2322,8 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
}
// fold (or x, undef) -> -1
- if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) {
+ if (!LegalOperations &&
+ (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
}
@@ -2425,6 +2431,11 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
return SDValue(Rot, 0);
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
@@ -3158,6 +3169,11 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return NewSRL;
}
+ // Attempt to convert a srl of a load into a narrower zero-extending load.
+ SDValue NarrowLoad = ReduceLoadWidth(N);
+ if (NarrowLoad.getNode())
+ return NarrowLoad;
+
// Here is a common situation. We want to optimize:
//
// %a = ...
@@ -3487,7 +3503,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3531,7 +3547,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -3557,24 +3573,24 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == N0VT.getSizeInBits())
- return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend
else {
- EVT MatchingElementType =
- EVT::getIntegerVT(*DAG.getContext(),
- N0VT.getScalarType().getSizeInBits());
- EVT MatchingVectorType =
- EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
- N0VT.getVectorNumElements());
- SDValue VsetCC =
- DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
+ SDValue VsetCC =
+ DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
}
}
@@ -3635,10 +3651,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE &&
- (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) &&
- (!TLI.isTruncateFree(N0.getOperand(0).getValueType(),
- N0.getValueType()) ||
- !TLI.isZExtFree(N0.getValueType(), VT))) {
+ (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
@@ -3679,7 +3692,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3723,7 +3736,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
EVT MemVT = LN0->getMemoryVT();
if ((!LegalOperations && !LN0->isVolatile()) ||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, MemVT)) {
- SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
@@ -3881,7 +3894,7 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI);
if (DoXform) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -3925,8 +3938,9 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
N0.hasOneUse()) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
- SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), N->getDebugLoc(),
- VT, LN0->getChain(), LN0->getBasePtr(),
+ SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT,
+ N->getDebugLoc(),
+ LN0->getChain(), LN0->getBasePtr(),
LN0->getSrcValue(),
LN0->getSrcValueOffset(), MemVT,
LN0->isVolatile(), LN0->isNonTemporal(),
@@ -3950,24 +3964,24 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// we know that the element size of the sext'd result matches the
// element size of the compare operands.
if (VT.getSizeInBits() == N0VT.getSizeInBits())
- return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getVSetCC(N->getDebugLoc(), VT, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
// If the desired elements are smaller or larger than the source
// elements we can use a matching integer vector type and then
// truncate/sign extend
else {
- EVT MatchingElementType =
- EVT::getIntegerVT(*DAG.getContext(),
- N0VT.getScalarType().getSizeInBits());
- EVT MatchingVectorType =
- EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
- N0VT.getVectorNumElements());
- SDValue VsetCC =
- DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
- N0.getOperand(1),
- cast<CondCodeSDNode>(N0.getOperand(2))->get());
- return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
+ EVT MatchingElementType =
+ EVT::getIntegerVT(*DAG.getContext(),
+ N0VT.getScalarType().getSizeInBits());
+ EVT MatchingVectorType =
+ EVT::getVectorVT(*DAG.getContext(), MatchingElementType,
+ N0VT.getVectorNumElements());
+ SDValue VsetCC =
+ DAG.getVSetCC(N->getDebugLoc(), MatchingVectorType, N0.getOperand(0),
+ N0.getOperand(1),
+ cast<CondCodeSDNode>(N0.getOperand(2))->get());
+ return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT);
}
}
@@ -4024,6 +4038,7 @@ SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) {
/// extended, also fold the extension to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
+
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -4040,6 +4055,15 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
return SDValue();
+ } else if (Opc == ISD::SRL) {
+ // Annother special-case: SRL is basically zero-extending a narrower
+ // value.
+ ExtType = ISD::ZEXTLOAD;
+ N0 = SDValue(N, 0);
+ ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (!N01) return SDValue();
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits() - N01->getZExtValue());
}
unsigned EVTBits = ExtVT.getSizeInBits();
@@ -4085,7 +4109,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
? DAG.getLoad(VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
LN0->isVolatile(), LN0->isNonTemporal(), NewAlign)
- : DAG.getExtLoad(ExtType, N0.getDebugLoc(), VT, LN0->getChain(), NewPtr,
+ : DAG.getExtLoad(ExtType, VT, N0.getDebugLoc(), LN0->getChain(), NewPtr,
LN0->getSrcValue(), LN0->getSrcValueOffset() + PtrOff,
ExtVT, LN0->isVolatile(), LN0->isNonTemporal(),
NewAlign);
@@ -4172,7 +4196,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -4189,7 +4213,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::SEXTLOAD, EVT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(), EVT,
@@ -4243,8 +4267,17 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
- if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT))
- return ReduceLoadWidth(N);
+ if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
+ SDValue Reduced = ReduceLoadWidth(N);
+ if (Reduced.getNode())
+ return Reduced;
+ }
+
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
@@ -4943,7 +4976,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) ||
TLI.isLoadExtLegal(ISD::EXTLOAD, N0.getValueType()))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, N->getDebugLoc(), VT,
+ SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, N->getDebugLoc(),
LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
LN0->getSrcValueOffset(),
@@ -5527,8 +5560,8 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) {
if (unsigned Align = DAG.InferPtrAlignment(Ptr)) {
if (Align > LD->getAlignment())
- return DAG.getExtLoad(LD->getExtensionType(), N->getDebugLoc(),
- LD->getValueType(0),
+ return DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
+ N->getDebugLoc(),
Chain, Ptr, LD->getSrcValue(),
LD->getSrcValueOffset(), LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(), Align);
@@ -5551,8 +5584,8 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment());
} else {
- ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(),
- LD->getValueType(0),
+ ReplLoad = DAG.getExtLoad(LD->getExtensionType(), LD->getValueType(0),
+ LD->getDebugLoc(),
BetterChain, Ptr, LD->getSrcValue(),
LD->getSrcValueOffset(),
LD->getMemoryVT(),
@@ -6077,7 +6110,6 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Check if the result type doesn't match the inserted element type. A
// SCALAR_TO_VECTOR may truncate the inserted element and the
// EXTRACT_VECTOR_ELT may widen the extracted vector.
- EVT EltVT = InVec.getValueType().getVectorElementType();
SDValue InOp = InVec.getOperand(0);
EVT NVT = N->getValueType(0);
if (InOp.getValueType() != NVT) {
@@ -6277,8 +6309,6 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
}
SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
- return SDValue();
-
EVT VT = N->getValueType(0);
unsigned NumElts = VT.getVectorNumElements();
@@ -6334,6 +6364,59 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
+ if (!TLI.getShouldFoldAtomicFences())
+ return SDValue();
+
+ SDValue atomic = N->getOperand(0);
+ switch (atomic.getOpcode()) {
+ case ISD::ATOMIC_CMP_SWAP:
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ break;
+ default:
+ return SDValue();
+ }
+
+ SDValue fence = atomic.getOperand(0);
+ if (fence.getOpcode() != ISD::MEMBARRIER)
+ return SDValue();
+
+ switch (atomic.getOpcode()) {
+ case ISD::ATOMIC_CMP_SWAP:
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
+ atomic.getOperand(1), atomic.getOperand(2),
+ atomic.getOperand(3)), atomic.getResNo());
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
+ atomic.getOperand(1), atomic.getOperand(2)),
+ atomic.getResNo());
+ default:
+ return SDValue();
+ }
+}
+
/// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
/// an AND to a vector_shuffle with the destination vector and a zero vector.
/// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>
@@ -6565,8 +6648,8 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
LLD->getAlignment());
} else {
Load = DAG.getExtLoad(LLD->getExtensionType(),
- TheSelect->getDebugLoc(),
TheSelect->getValueType(0),
+ TheSelect->getDebugLoc(),
LLD->getChain(), Addr, 0, 0,
LLD->getMemoryVT(),
LLD->isVolatile(),
@@ -6807,38 +6890,34 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
}
}
- // Check to see if this is an integer abs. select_cc setl[te] X, 0, -X, X ->
+ // Check to see if this is an integer abs.
+ // select_cc setg[te] X, 0, X, -X ->
+ // select_cc setgt X, -1, X, -X ->
+ // select_cc setl[te] X, 0, -X, X ->
+ // select_cc setlt X, 1, -X, X ->
// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
- if (N1C && N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE) &&
- N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) &&
- N2.getOperand(0) == N1 && N0.getValueType().isInteger()) {
+ if (N1C) {
+ ConstantSDNode *SubC = NULL;
+ if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) ||
+ (N1C->isAllOnesValue() && CC == ISD::SETGT)) &&
+ N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1))
+ SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0));
+ else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) ||
+ (N1C->isOne() && CC == ISD::SETLT)) &&
+ N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1))
+ SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0));
+
EVT XType = N0.getValueType();
- SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType, N0,
- DAG.getConstant(XType.getSizeInBits()-1,
- getShiftAmountTy()));
- SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(), XType,
- N0, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
- return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
- }
- // Check to see if this is an integer abs. select_cc setgt X, -1, X, -X ->
- // Y = sra (X, size(X)-1); xor (add (X, Y), Y)
- if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT &&
- N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) {
- if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) {
- EVT XType = N0.getValueType();
- if (SubC->isNullValue() && XType.isInteger()) {
- SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
- N0,
- DAG.getConstant(XType.getSizeInBits()-1,
- getShiftAmountTy()));
- SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
- XType, N0, Shift);
- AddToWorkList(Shift.getNode());
- AddToWorkList(Add.getNode());
- return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
- }
+ if (SubC && SubC->isNullValue() && XType.isInteger()) {
+ SDValue Shift = DAG.getNode(ISD::SRA, N0.getDebugLoc(), XType,
+ N0,
+ DAG.getConstant(XType.getSizeInBits()-1,
+ getShiftAmountTy()));
+ SDValue Add = DAG.getNode(ISD::ADD, N0.getDebugLoc(),
+ XType, N0, Shift);
+ AddToWorkList(Shift.getNode());
+ AddToWorkList(Add.getNode());
+ return DAG.getNode(ISD::XOR, DL, XType, Add, Shift);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 95f4d07..decaa76 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -44,18 +44,38 @@
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "FunctionLoweringInfo.h"
using namespace llvm;
+/// startNewBlock - Set the current block to which generated machine
+/// instructions will be appended, and clear the local CSE map.
+///
+void FastISel::startNewBlock() {
+ LocalValueMap.clear();
+
+ // Start out as null, meaining no local-value instructions have
+ // been emitted.
+ LastLocalValue = 0;
+
+ // Advance the last local value past any EH_LABEL instructions.
+ MachineBasicBlock::iterator
+ I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
+ while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
+ LastLocalValue = I;
+ ++I;
+ }
+}
+
bool FastISel::hasTrivialKill(const Value *V) const {
// Don't consider constants or arguments to have trivial kills.
const Instruction *I = dyn_cast<Instruction>(V);
@@ -99,25 +119,31 @@ unsigned FastISel::getRegForValue(const Value *V) {
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
- DenseMap<const Value *, unsigned>::iterator I = ValueMap.find(V);
- if (I != ValueMap.end())
- return I->second;
+ DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
+ if (I != FuncInfo.ValueMap.end()) {
+ unsigned Reg = I->second;
+ return Reg;
+ }
unsigned Reg = LocalValueMap[V];
if (Reg != 0)
return Reg;
// In bottom-up mode, just create the virtual register which will be used
// to hold the value. It will be materialized later.
- if (IsBottomUp) {
- Reg = createResultReg(TLI.getRegClassFor(VT));
- if (isa<Instruction>(V))
- ValueMap[V] = Reg;
- else
- LocalValueMap[V] = Reg;
- return Reg;
- }
+ if (isa<Instruction>(V) &&
+ (!isa<AllocaInst>(V) ||
+ !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
+ return FuncInfo.InitializeRegForValue(V);
- return materializeRegForValue(V, VT);
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
+ // Materialize the value in a register. Emit any instructions in the
+ // local value area.
+ Reg = materializeRegForValue(V, VT);
+
+ leaveLocalValueArea(SaveInsertPt);
+
+ return Reg;
}
/// materializeRegForValue - Helper for getRegForVale. This function is
@@ -161,11 +187,15 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
}
}
} else if (const Operator *Op = dyn_cast<Operator>(V)) {
- if (!SelectOperator(Op, Op->getOpcode())) return 0;
- Reg = LocalValueMap[Op];
+ if (!SelectOperator(Op, Op->getOpcode()))
+ if (!isa<Instruction>(Op) ||
+ !TargetSelectInstruction(cast<Instruction>(Op)))
+ return 0;
+ Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
// If target-independent code couldn't handle the value, give target-specific
@@ -175,8 +205,10 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate.
- if (Reg != 0)
+ if (Reg != 0) {
LocalValueMap[V] = Reg;
+ LastLocalValue = MRI.getVRegDef(Reg);
+ }
return Reg;
}
@@ -185,8 +217,9 @@ unsigned FastISel::lookUpRegForValue(const Value *V) {
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominates-use requirement enforced.
- if (ValueMap.count(V))
- return ValueMap[V];
+ DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
+ if (I != FuncInfo.ValueMap.end())
+ return I->second;
return LocalValueMap[V];
}
@@ -202,14 +235,17 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
return Reg;
}
- unsigned &AssignedReg = ValueMap[I];
+ unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0)
+ // Use the new register.
AssignedReg = Reg;
else if (Reg != AssignedReg) {
- const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
- TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
- Reg, RegClass, RegClass, DL);
+ // Arrange for uses of AssignedReg to be replaced by uses of Reg.
+ FuncInfo.RegFixups[AssignedReg] = Reg;
+
+ AssignedReg = Reg;
}
+
return AssignedReg;
}
@@ -237,6 +273,37 @@ std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
}
+void FastISel::recomputeInsertPt() {
+ if (getLastLocalValue()) {
+ FuncInfo.InsertPt = getLastLocalValue();
+ ++FuncInfo.InsertPt;
+ } else
+ FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
+
+ // Now skip past any EH_LABELs, which must remain at the beginning.
+ while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
+ FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
+ ++FuncInfo.InsertPt;
+}
+
+FastISel::SavePoint FastISel::enterLocalValueArea() {
+ MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
+ DebugLoc OldDL = DL;
+ recomputeInsertPt();
+ DL = DebugLoc();
+ SavePoint SP = { OldInsertPt, OldDL };
+ return SP;
+}
+
+void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
+ if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
+ LastLocalValue = llvm::prior(FuncInfo.InsertPt);
+
+ // Restore the previous insert position.
+ FuncInfo.InsertPt = OldInsertPt.InsertPt;
+ DL = OldInsertPt.DL;
+}
+
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
/// which has an opcode which directly corresponds to the given ISD opcode.
///
@@ -345,7 +412,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (CI->isZero()) continue;
uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
@@ -395,7 +462,7 @@ bool FastISel::SelectCall(const User *I) {
case Intrinsic::dbg_declare: {
const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
if (!DIVariable(DI->getVariable()).Verify() ||
- !MF.getMMI().hasDebugInfo())
+ !FuncInfo.MF->getMMI().hasDebugInfo())
return true;
const Value *Address = DI->getAddress();
@@ -409,11 +476,12 @@ bool FastISel::SelectCall(const User *I) {
// those are handled in SelectionDAGBuilder.
if (AI) {
DenseMap<const AllocaInst*, int>::iterator SI =
- StaticAllocaMap.find(AI);
- if (SI == StaticAllocaMap.end()) break; // VLAs.
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI == FuncInfo.StaticAllocaMap.end()) break; // VLAs.
int FI = SI->second;
if (!DI->getDebugLoc().isUnknown())
- MF.getMMI().setVariableDbgInfo(DI->getVariable(), FI, DI->getDebugLoc());
+ FuncInfo.MF->getMMI().setVariableDbgInfo(DI->getVariable(),
+ FI, DI->getDebugLoc());
} else
// Building the map above is target independent. Generating DBG_VALUE
// inline is target dependent; do this now.
@@ -428,23 +496,28 @@ bool FastISel::SelectCall(const User *I) {
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addImm(CI->getZExtValue()).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addFPImm(CF).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
- BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
// generating code, thus altering codegen because of debug info.
// Insert an undef so we can see what we dropped.
- BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
- addMetadata(DI->getVariable());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(0U).addImm(DI->getOffset())
+ .addMetadata(DI->getVariable());
}
return true;
}
@@ -453,14 +526,13 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
default: break;
case TargetLowering::Expand: {
- assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
+ assert(FuncInfo.MBB->isLandingPad() &&
+ "Call to eh.exception not in landing pad!");
unsigned Reg = TLI.getExceptionAddressRegister();
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Reg, RC, RC, DL);
- assert(InsertedCopy && "Can't copy address registers!");
- InsertedCopy = InsertedCopy;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -472,25 +544,23 @@ bool FastISel::SelectCall(const User *I) {
switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
default: break;
case TargetLowering::Expand: {
- if (MBB->isLandingPad())
- AddCatchInfo(*cast<CallInst>(I), &MF.getMMI(), MBB);
+ if (FuncInfo.MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB);
else {
#ifndef NDEBUG
- CatchInfoLost.insert(cast<CallInst>(I));
+ FuncInfo.CatchInfoLost.insert(cast<CallInst>(I));
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) MBB->addLiveIn(Reg);
+ if (Reg) FuncInfo.MBB->addLiveIn(Reg);
}
unsigned Reg = TLI.getExceptionSelectorRegister();
EVT SrcVT = TLI.getPointerTy();
const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
unsigned ResultReg = createResultReg(RC);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
- RC, RC, DL);
- assert(InsertedCopy && "Can't copy address registers!");
- InsertedCopy = InsertedCopy;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Reg);
bool ResultRegIsKill = hasTrivialKill(I);
@@ -605,12 +675,12 @@ bool FastISel::SelectBitCast(const User *I) {
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
- ResultReg = createResultReg(DstClass);
-
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- Op0, DstClass, SrcClass, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ // Don't attempt a cross-class copy. It will likely fail.
+ if (SrcClass == DstClass) {
+ ResultReg = createResultReg(DstClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(Op0);
+ }
}
// If the reg-reg copy failed, select a BIT_CONVERT opcode.
@@ -655,14 +725,15 @@ FastISel::SelectInstruction(const Instruction *I) {
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
void
-FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
- if (MBB->isLayoutSuccessor(MSucc)) {
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
+ if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
+ TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
+ SmallVector<MachineOperand, 0>(), DL);
}
- MBB->addSuccessor(MSucc);
+ FuncInfo.MBB->addSuccessor(MSucc);
}
/// SelectFNeg - Emit an FNeg operation.
@@ -762,8 +833,8 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
if (BI->isUnconditional()) {
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
- MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
- FastEmitBranch(MSucc);
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
+ FastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
@@ -778,7 +849,7 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
case Instruction::Alloca:
// FunctionLowering has the static-sized case covered.
- if (StaticAllocaMap.count(cast<AllocaInst>(I)))
+ if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
return true;
// Dynamic-sized alloca is not handled yet.
@@ -824,32 +895,16 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
}
}
-FastISel::FastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &pn
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- )
- : MBB(0),
- ValueMap(vm),
- MBBMap(bm),
- StaticAllocaMap(am),
- PHINodesToUpdate(pn),
-#ifndef NDEBUG
- CatchInfoLost(cil),
-#endif
- MF(mf),
- MRI(MF.getRegInfo()),
- MFI(*MF.getFrameInfo()),
- MCP(*MF.getConstantPool()),
- TM(MF.getTarget()),
+FastISel::FastISel(FunctionLoweringInfo &funcInfo)
+ : FuncInfo(funcInfo),
+ MRI(FuncInfo.MF->getRegInfo()),
+ MFI(*FuncInfo.MF->getFrameInfo()),
+ MCP(*FuncInfo.MF->getConstantPool()),
+ TM(FuncInfo.MF->getTarget()),
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
- IsBottomUp(false) {
+ TRI(*TM.getRegisterInfo()) {
}
FastISel::~FastISel() {}
@@ -978,7 +1033,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(MBB, DL, II, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
return ResultReg;
}
@@ -989,13 +1044,13 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ .addReg(Op0, Op0IsKill * RegState::Kill);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
@@ -1009,17 +1064,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1032,17 +1085,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1055,17 +1106,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1079,19 +1128,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(MBB, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1103,13 +1150,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(MBB, DL, II).addImm(Imm);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1117,24 +1162,12 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, bool Op0IsKill,
uint32_t Idx) {
- const TargetRegisterClass* RC = MRI.getRegClass(Op0);
-
unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
-
- if (II.getNumDefs() >= 1)
- BuildMI(MBB, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addImm(Idx);
- else {
- BuildMI(MBB, DL, II)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addImm(Idx);
- bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- II.ImplicitDefs[0], RC, RC, DL);
- if (!InsertedCopy)
- ResultReg = 0;
- }
+ assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
+ "Cannot yet extract from physregs");
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(TargetOpcode::COPY), ResultReg)
+ .addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
@@ -1154,14 +1187,14 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
- unsigned OrigNumPHINodesToUpdate = PHINodesToUpdate.size();
+ unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin())) continue;
- MachineBasicBlock *SuccMBB = MBBMap[SuccBB];
+ MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
@@ -1182,7 +1215,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// by bailing out early, we may leave behind some dead instructions,
// since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
// own moves. Second, this check is necessary becuase FastISel doesn't
- // use CreateRegForValue to create registers, so it always creates
+ // use CreateRegs to create registers, so it always creates
// exactly one register for each non-void instruction.
EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
@@ -1190,7 +1223,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
if (VT == MVT::i1)
VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
else {
- PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
+ FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
return false;
}
}
@@ -1205,10 +1238,10 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
unsigned Reg = getRegForValue(PHIOp);
if (Reg == 0) {
- PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
+ FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
return false;
}
- PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
+ FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
DL = DebugLoc();
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 65c36c1..928e1ec 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "function-lowering-info"
-#include "FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
@@ -30,7 +30,6 @@
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetFrameInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Debug.h"
@@ -47,9 +46,11 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
if (isa<PHINode>(I)) return true;
const BasicBlock *BB = I->getParent();
for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
- UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
+ UI != E; ++UI) {
+ const User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
return true;
+ }
return false;
}
@@ -64,9 +65,11 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool EnableFastISel) {
const BasicBlock *Entry = A->getParent()->begin();
for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
- UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
+ UI != E; ++UI) {
+ const User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
return false; // Use not in entry block.
+ }
return true;
}
@@ -74,12 +77,18 @@ FunctionLoweringInfo::FunctionLoweringInfo(const TargetLowering &tli)
: TLI(tli) {
}
-void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
- bool EnableFastISel) {
+void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
Fn = &fn;
MF = &mf;
RegInfo = &MF->getRegInfo();
+ // Check whether the function can return without sret-demotion.
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(Fn->getReturnType(),
+ Fn->getAttributes().getRetAttributes(), Outs, TLI);
+ CanLowerReturn = TLI.CanLowerReturn(Fn->getCallingConv(), Fn->isVarArg(),
+ Outs, Fn->getContext());
+
// Create a vreg for each argument register that is not dead and is used
// outside of the entry block for the function.
for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
@@ -172,31 +181,33 @@ void FunctionLoweringInfo::clear() {
#endif
LiveOutRegInfo.clear();
ArgDbgValues.clear();
+ RegFixups.clear();
}
-unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
+/// CreateReg - Allocate a single virtual register for the given type.
+unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
}
-/// CreateRegForValue - Allocate the appropriate number of virtual registers of
+/// CreateRegs - Allocate the appropriate number of virtual registers of
/// the correctly promoted or expanded types. Assign these registers
/// consecutive vreg numbers and return the first assigned number.
///
/// In the case that the given value has struct or array type, this function
/// will assign registers for each member or element.
///
-unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
+unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, V->getType(), ValueVTs);
+ ComputeValueVTs(TLI, Ty, ValueVTs);
unsigned FirstReg = 0;
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
EVT ValueVT = ValueVTs[Value];
- EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
+ EVT RegisterVT = TLI.getRegisterType(Ty->getContext(), ValueVT);
- unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
+ unsigned NumRegs = TLI.getNumRegisters(Ty->getContext(), ValueVT);
for (unsigned i = 0; i != NumRegs; ++i) {
- unsigned R = MakeReg(RegisterVT);
+ unsigned R = CreateReg(RegisterVT);
if (!FirstReg) FirstReg = R;
}
}
@@ -208,7 +219,7 @@ unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
MachineBasicBlock *MBB) {
// Inform the MachineModuleInfo of the personality for this landing pad.
- const ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
+ const ConstantExpr *CE = cast<ConstantExpr>(I.getArgOperand(1));
assert(CE->getOpcode() == Instruction::BitCast &&
isa<Function>(CE->getOperand(0)) &&
"Personality should be a function");
@@ -217,18 +228,18 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
// Gather all the type infos for this landing pad and pass them along to
// MachineModuleInfo.
std::vector<const GlobalVariable *> TyInfo;
- unsigned N = I.getNumOperands();
+ unsigned N = I.getNumArgOperands();
- for (unsigned i = N - 1; i > 2; --i) {
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
+ for (unsigned i = N - 1; i > 1; --i) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(i))) {
unsigned FilterLength = CI->getZExtValue();
unsigned FirstCatch = i + FilterLength + !FilterLength;
- assert (FirstCatch <= N && "Invalid filter length");
+ assert(FirstCatch <= N && "Invalid filter length");
if (FirstCatch < N) {
TyInfo.reserve(N - FirstCatch);
for (unsigned j = FirstCatch; j < N; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addCatchTypeInfo(MBB, TyInfo);
TyInfo.clear();
}
@@ -240,7 +251,7 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
// Filter.
TyInfo.reserve(FilterLength - 1);
for (unsigned j = i + 1; j < FirstCatch; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addFilterTypeInfo(MBB, TyInfo);
TyInfo.clear();
}
@@ -249,10 +260,10 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
}
}
- if (N > 3) {
- TyInfo.reserve(N - 3);
- for (unsigned j = 3; j < N; ++j)
- TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+ if (N > 2) {
+ TyInfo.reserve(N - 2);
+ for (unsigned j = 2; j < N; ++j)
+ TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
MMI->addCatchTypeInfo(MBB, TyInfo);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 16eb8a7..61c2a90 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -123,7 +123,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
EVT VT = Node->getValueType(ResNo);
const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
- SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, VT);
+ SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
// Figure out the register class to create for the destreg.
if (VRBase) {
@@ -142,11 +142,8 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
} else {
// Create the reg, emit the copy.
VRBase = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, VRBase, SrcReg,
- DstRC, SrcRC, Node->getDebugLoc());
-
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ VRBase).addReg(SrcReg);
}
SDValue Op(Node, ResNo);
@@ -246,7 +243,7 @@ unsigned InstrEmitter::getVR(SDValue Op,
const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
VReg = MRI->createVirtualRegister(RC);
}
- BuildMI(MBB, Op.getDebugLoc(),
+ BuildMI(*MBB, InsertPos, Op.getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), VReg);
return VReg;
}
@@ -288,10 +285,8 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
"Don't have operand info for this instruction!");
if (DstRC && SrcRC != DstRC && !SrcRC->hasSuperClass(DstRC)) {
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
- DstRC, SrcRC, Op.getNode()->getDebugLoc());
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), NewVReg).addReg(VReg);
VReg = NewVReg;
}
}
@@ -428,12 +423,9 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
}
if (Opc == TargetOpcode::EXTRACT_SUBREG) {
+ // EXTRACT_SUBREG is lowered as %dst = COPY %src:sub
unsigned SubIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
- // Create the extract_subreg machine instruction.
- MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
- TII->get(TargetOpcode::EXTRACT_SUBREG));
-
// Figure out the register class to create for the destreg.
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
const TargetRegisterClass *TRC = MRI->getRegClass(VReg);
@@ -450,11 +442,16 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
VRBase = MRI->createVirtualRegister(SRC);
}
- // Add def, source, and subreg index
- MI->addOperand(MachineOperand::CreateReg(VRBase, true));
+ // Create the extract_subreg machine instruction.
+ MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), VRBase);
+
+ // Add source, and subreg index
AddOperand(MI, Node->getOperand(0), 0, 0, VRBaseMap, /*IsDebug=*/false,
IsClone, IsCloned);
- MI->addOperand(MachineOperand::CreateImm(SubIdx));
+ assert(TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()) &&
+ "Cannot yet extract from physregs");
+ MI->getOperand(1).setSubReg(SubIdx);
MBB->insert(InsertPos, MI);
} else if (Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {
@@ -511,18 +508,13 @@ void
InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
DenseMap<SDValue, unsigned> &VRBaseMap) {
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
- const TargetRegisterClass *SrcRC = MRI->getRegClass(VReg);
+ // Create the new VReg in the destination class and emit a copy.
unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
const TargetRegisterClass *DstRC = TRI->getRegClass(DstRCIdx);
-
- // Create the new VReg in the destination class and emit a copy.
unsigned NewVReg = MRI->createVirtualRegister(DstRC);
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, NewVReg, VReg,
- DstRC, SrcRC, Node->getDebugLoc());
- assert(Emitted &&
- "Unable to issue a copy instruction for a COPY_TO_REGCLASS node!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ NewVReg).addReg(VReg);
SDValue Op(Node, 0);
bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
@@ -604,9 +596,10 @@ InstrEmitter::EmitDbgValue(SDDbgValue *SD,
} else if (SD->getKind() == SDDbgValue::CONST) {
const Value *V = SD->getConst();
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- // FIXME: SDDbgValues aren't updated with legalization, so it's possible
- // to have i128 values in them at this point. As a crude workaround, just
- // drop the debug info if this happens.
+ // FIXME: SDDbgValue constants aren't updated with legalization, so it's
+ // possible to have i128 constants in them at this point. Dwarf writer
+ // does not handle i128 constants at the moment so, as a crude workaround,
+ // just drop the debug info if this happens.
if (!CI->getValue().isSignedIntN(64))
MIB.addReg(0U);
else
@@ -676,6 +669,33 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
// Create the new machine instruction.
MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
+
+ // The MachineInstr constructor adds implicit-def operands. Scan through
+ // these to determine which are dead.
+ if (MI->getNumOperands() != 0 &&
+ Node->getValueType(Node->getNumValues()-1) == MVT::Flag) {
+ // First, collect all used registers.
+ SmallVector<unsigned, 8> UsedRegs;
+ for (SDNode *F = Node->getFlaggedUser(); F; F = F->getFlaggedUser())
+ if (F->getOpcode() == ISD::CopyFromReg)
+ UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
+ else {
+ // Collect declared implicit uses.
+ const TargetInstrDesc &TID = TII->get(F->getMachineOpcode());
+ UsedRegs.append(TID.getImplicitUses(),
+ TID.getImplicitUses() + TID.getNumImplicitUses());
+ // In addition to declared implicit uses, we must also check for
+ // direct RegisterSDNode operands.
+ for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
+ if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
+ unsigned Reg = R->getReg();
+ if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg))
+ UsedRegs.push_back(Reg);
+ }
+ }
+ // Then mark unused registers as dead.
+ MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
+ }
// Add result register values for things that are defined by this
// instruction.
@@ -696,16 +716,24 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned,
MI->setMemRefs(cast<MachineSDNode>(Node)->memoperands_begin(),
cast<MachineSDNode>(Node)->memoperands_end());
+ // Insert the instruction into position in the block. This needs to
+ // happen before any custom inserter hook is called so that the
+ // hook knows where in the block to insert the replacement code.
+ MBB->insert(InsertPos, MI);
+
if (II.usesCustomInsertionHook()) {
// Insert this instruction into the basic block using a target
// specific inserter which may returns a new basic block.
- MBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
- InsertPos = MBB->end();
+ bool AtEnd = InsertPos == MBB->end();
+ MachineBasicBlock *NewMBB = TLI->EmitInstrWithCustomInserter(MI, MBB);
+ if (NewMBB != MBB) {
+ if (AtEnd)
+ InsertPos = NewMBB->end();
+ MBB = NewMBB;
+ }
return;
}
- MBB->insert(InsertPos, MI);
-
// Additional results must be an physical register def.
if (HasPhysRegOuts) {
for (unsigned i = II.getNumDefs(); i < NumResults; ++i) {
@@ -761,24 +789,9 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
unsigned DestReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
if (SrcReg == DestReg) // Coalesced away the copy? Ignore.
break;
-
- const TargetRegisterClass *SrcTRC = 0, *DstTRC = 0;
- // Get the register classes of the src/dst.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg))
- SrcTRC = MRI->getRegClass(SrcReg);
- else
- SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
- if (TargetRegisterInfo::isVirtualRegister(DestReg))
- DstTRC = MRI->getRegClass(DestReg);
- else
- DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
- Node->getOperand(1).getValueType());
-
- bool Emitted = TII->copyRegToReg(*MBB, InsertPos, DestReg, SrcReg,
- DstTRC, SrcTRC, Node->getDebugLoc());
- assert(Emitted && "Unable to issue a copy instruction!\n");
- (void) Emitted;
+ BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ DestReg).addReg(SrcReg);
break;
}
case ISD::CopyFromReg: {
@@ -807,6 +820,12 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
const char *AsmStr = cast<ExternalSymbolSDNode>(AsmStrV)->getSymbol();
MI->addOperand(MachineOperand::CreateES(AsmStr));
+ // Add the isAlignStack bit.
+ int64_t isAlignStack =
+ cast<ConstantSDNode>(Node->getOperand(InlineAsm::Op_IsAlignStack))->
+ getZExtValue();
+ MI->addOperand(MachineOperand::CreateImm(isAlignStack));
+
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
unsigned Flags =
@@ -821,14 +840,22 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
case InlineAsm::Kind_RegDef:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
- MI->addOperand(MachineOperand::CreateReg(Reg, true));
+ // FIXME: Add dead flags for physical and virtual registers defined.
+ // For now, mark physical register defs as implicit to help fast
+ // regalloc. This makes inline asm look a lot like calls.
+ MI->addOperand(MachineOperand::CreateReg(Reg, true,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg)));
}
break;
case InlineAsm::Kind_RegDefEarlyClobber:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
- MI->addOperand(MachineOperand::CreateReg(Reg, true, false, false,
- false, false, true));
+ MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg),
+ /*isKill=*/ false,
+ /*isDead=*/ false,
+ /*isUndef=*/false,
+ /*isEarlyClobber=*/ true));
}
break;
case InlineAsm::Kind_RegUse: // Use of register.
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 62a37a5..7a47da4 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -31,6 +31,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/DenseMap.h"
@@ -133,7 +134,7 @@ private:
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
SDValue ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
- SDValue N1, SDValue N2,
+ SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const;
bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
@@ -143,6 +144,8 @@ private:
DebugLoc dl);
SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
+ std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node, bool isSigned);
SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_PPCF128);
@@ -172,6 +175,8 @@ private:
SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
SDValue ExpandVectorBuildThroughStack(SDNode* Node);
+ std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
+
void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
};
@@ -181,8 +186,8 @@ private:
/// performs the same shuffe in terms of order or result bytes, but on a type
/// whose vector element type is narrower than the original shuffle type.
/// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
-SDValue
-SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
+SDValue
+SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
SDValue N1, SDValue N2,
SmallVectorImpl<int> &Mask) const {
unsigned NumMaskElts = VT.getVectorNumElements();
@@ -193,12 +198,12 @@ SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl,
if (NumEltsGrowth == 1)
return DAG.getVectorShuffle(NVT, dl, N1, N2, &Mask[0]);
-
+
SmallVector<int, 8> NewMask;
for (unsigned i = 0; i != NumMaskElts; ++i) {
int Idx = Mask[i];
for (unsigned j = 0; j != NumEltsGrowth; ++j) {
- if (Idx < 0)
+ if (Idx < 0)
NewMask.push_back(-1);
else
NewMask.push_back(Idx * NumEltsGrowth + j);
@@ -320,7 +325,8 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest,
bool OperandsLeadToDest = false;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
OperandsLeadToDest |= // If an operand leads to Dest, so do we.
- LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest, NodesLeadingTo);
+ LegalizeAllNodesNotLeadingTo(N->getOperand(i).getNode(), Dest,
+ NodesLeadingTo);
if (OperandsLeadToDest) {
NodesLeadingTo.insert(N);
@@ -357,7 +363,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
EVT SVT = VT;
while (SVT != MVT::f32) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
- if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
+ if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
@@ -372,8 +378,8 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
if (Extend)
- return DAG.getExtLoad(ISD::EXTLOAD, dl,
- OrigVT, DAG.getEntryNode(),
+ return DAG.getExtLoad(ISD::EXTLOAD, OrigVT, dl,
+ DAG.getEntryNode(),
CPIdx, PseudoSourceValue::getConstantPool(),
0, VT, false, false, Alignment);
return DAG.getLoad(OrigVT, dl, DAG.getEntryNode(), CPIdx,
@@ -450,7 +456,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
8 * (StoredBytes - Offset));
// Load from the stack slot.
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Store, StackPtr,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Store, StackPtr,
NULL, 0, MemVT, false, false, 0);
Stores.push_back(DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr,
@@ -552,7 +558,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// The last copy may be partial. Do an extending load.
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(),
8 * (LoadedBytes - Offset));
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, RegVT, dl, Chain, Ptr,
LD->getSrcValue(), SVOffset + Offset,
MemVT, LD->isVolatile(),
LD->isNonTemporal(),
@@ -568,7 +574,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
Stores.size());
// Finally, perform the original load only redirected to the stack slot.
- Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase,
+ Load = DAG.getExtLoad(LD->getExtensionType(), VT, dl, TF, StackBase,
NULL, 0, LoadedVT, false, false, 0);
// Callers expect a MERGE_VALUES node.
@@ -597,23 +603,23 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
// Load the value in two parts
SDValue Lo, Hi;
if (TLI.isLittleEndian()) {
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset, NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
- LD->isNonTemporal(), MinAlign(Alignment, IncrementSize));
+ LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
} else {
- Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Hi = DAG.getExtLoad(HiExtType, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset, NewLoadedVT, LD->isVolatile(),
LD->isNonTemporal(), Alignment);
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getConstant(IncrementSize, TLI.getPointerTy()));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, dl, Chain, Ptr, LD->getSrcValue(),
SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
- LD->isNonTemporal(), MinAlign(Alignment, IncrementSize));
+ LD->isNonTemporal(), MinAlign(Alignment,IncrementSize));
}
// aggregate the two parts
@@ -773,7 +779,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
"Unexpected illegal type!");
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i)
- assert((isTypeLegal(Node->getOperand(i).getValueType()) ||
+ assert((isTypeLegal(Node->getOperand(i).getValueType()) ||
Node->getOperand(i).getOpcode() == ISD::TargetConstant) &&
"Unexpected illegal type!");
@@ -853,6 +859,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::MERGE_VALUES:
case ISD::EH_RETURN:
case ISD::FRAME_TO_ARGS_OFFSET:
+ case ISD::EH_SJLJ_SETJMP:
+ case ISD::EH_SJLJ_LONGJMP:
// These operations lie about being legal: when they claim to be legal,
// they should actually be expanded.
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
@@ -925,8 +933,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
}
- Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(),
- Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(),
+ Ops.size()), 0);
switch (Action) {
case TargetLowering::Legal:
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
@@ -1000,11 +1008,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
NodesLeadingTo);
}
- // Now that we legalized all of the inputs (which may have inserted
- // libcalls) create the new CALLSEQ_START node.
+ // Now that we have legalized all of the inputs (which may have inserted
+ // libcalls), create the new CALLSEQ_START node.
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
- // Merge in the last call, to ensure that this call start after the last
+ // Merge in the last call to ensure that this call starts after the last
// call ended.
if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) {
Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
@@ -1016,7 +1024,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0],
+ Ops.size()), Result.getResNo());
}
// Remember that the CALLSEQ_START is legalized.
@@ -1058,7 +1067,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
} else {
Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1));
@@ -1067,7 +1078,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
Ops.back() = Tmp2;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
}
assert(IsLegalizingCall && "Call sequence imbalance between start/end?");
@@ -1087,7 +1100,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
EVT VT = Node->getValueType(0);
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp3 = Result.getValue(0);
Tmp4 = Result.getValue(1);
@@ -1100,7 +1115,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
DAG, TLI);
Tmp3 = Result.getOperand(0);
Tmp4 = Result.getOperand(1);
@@ -1166,7 +1181,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
ISD::LoadExtType NewExtType =
ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
- Result = DAG.getExtLoad(NewExtType, dl, Node->getValueType(0),
+ Result = DAG.getExtLoad(NewExtType, Node->getValueType(0), dl,
Tmp1, Tmp2, LD->getSrcValue(), SVOffset,
NVT, isVolatile, isNonTemporal, Alignment);
@@ -1202,8 +1217,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
if (TLI.isLittleEndian()) {
// EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
// Load the bottom RoundWidth bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl,
- Node->getValueType(0), Tmp1, Tmp2,
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), dl,
+ Tmp1, Tmp2,
LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
isNonTemporal, Alignment);
@@ -1211,13 +1226,13 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
IncrementSize = RoundWidth / 8;
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset + IncrementSize,
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
- // Build a factor node to remember that this load is independent of the
- // other one.
+ // Build a factor node to remember that this load is independent of
+ // the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
@@ -1231,7 +1246,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Big endian - avoid unaligned loads.
// EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
// Load the top RoundWidth bits.
- Hi = DAG.getExtLoad(ExtType, dl, Node->getValueType(0), Tmp1, Tmp2,
+ Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
isNonTemporal, Alignment);
@@ -1239,14 +1254,14 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
IncrementSize = RoundWidth / 8;
Tmp2 = DAG.getNode(ISD::ADD, dl, Tmp2.getValueType(), Tmp2,
DAG.getIntPtrConstant(IncrementSize));
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl,
- Node->getValueType(0), Tmp1, Tmp2,
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD,
+ Node->getValueType(0), dl, Tmp1, Tmp2,
LD->getSrcValue(), SVOffset + IncrementSize,
ExtraVT, isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
- // Build a factor node to remember that this load is independent of the
- // other one.
+ // Build a factor node to remember that this load is independent of
+ // the other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
@@ -1267,7 +1282,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
isCustom = true;
// FALLTHROUGH
case TargetLowering::Legal:
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp1 = Result.getValue(0);
Tmp2 = Result.getValue(1);
@@ -1281,10 +1298,12 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ const Type *Ty =
+ LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ unsigned ABIAlignment =
+ TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
- Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
+ Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
DAG, TLI);
Tmp1 = Result.getOperand(0);
Tmp2 = Result.getOperand(1);
@@ -1310,10 +1329,11 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp2 = LegalizeOp(Load.getValue(1));
break;
}
- assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
+ assert(ExtType != ISD::EXTLOAD &&
+ "EXTLOAD should always be supported!");
// Turn the unsupported load into an EXTLOAD followed by an explicit
// zero/sign extend inreg.
- Result = DAG.getExtLoad(ISD::EXTLOAD, dl, Node->getValueType(0),
+ Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0), dl,
Tmp1, Tmp2, LD->getSrcValue(),
LD->getSrcValueOffset(), SrcVT,
LD->isVolatile(), LD->isNonTemporal(),
@@ -1355,8 +1375,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
{
Tmp3 = LegalizeOp(ST->getValue());
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
EVT VT = Tmp3.getValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
@@ -1366,7 +1388,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
DAG, TLI);
@@ -1459,8 +1481,10 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
} else {
if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
Tmp2 != ST->getBasePtr())
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
default: assert(0 && "This action is not supported yet!");
@@ -1469,7 +1493,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
+ unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
DAG, TLI);
@@ -1531,7 +1555,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
return DAG.getLoad(Op.getValueType(), dl, Ch, StackPtr, NULL, 0,
false, false, 0);
else
- return DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
+ return DAG.getExtLoad(ISD::EXTLOAD, Op.getValueType(), dl, Ch, StackPtr,
NULL, 0, Vec.getValueType().getVectorElementType(),
false, false, 0);
}
@@ -1568,7 +1592,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
Node->getOperand(i), Idx, SV, Offset,
EltVT, false, false, 0));
} else
- Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
+ Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl,
Node->getOperand(i), Idx, SV, Offset,
false, false, 0));
}
@@ -1763,7 +1787,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
DestAlign);
assert(SlotSize < DestSize && "Unknown extension!");
- return DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT, Store, FIPtr, SV, 0, SlotVT,
+ return DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl, Store, FIPtr, SV, 0, SlotVT,
false, false, DestAlign);
}
@@ -1926,6 +1950,44 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
return CallInfo.first;
}
+// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
+// ExpandLibCall except that the first operand is the in-chain.
+std::pair<SDValue, SDValue>
+SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
+ SDNode *Node,
+ bool isSigned) {
+ assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
+ SDValue InChain = Node->getOperand(0);
+
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
+ EVT ArgVT = Node->getOperand(i).getValueType();
+ const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Entry.Node = Node->getOperand(i);
+ Entry.Ty = ArgTy;
+ Entry.isSExt = isSigned;
+ Entry.isZExt = !isSigned;
+ Args.push_back(Entry);
+ }
+ SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
+ TLI.getPointerTy());
+
+ // Splice the libcall in wherever FindInputOutputChains tells us to.
+ const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ std::pair<SDValue, SDValue> CallInfo =
+ TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+ 0, TLI.getLibcallCallingConv(LC), false,
+ /*isReturnValueUsed=*/true,
+ Callee, Args, DAG, Node->getDebugLoc());
+
+ // Legalize the call sequence, starting with the chain. This will advance
+ // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
+ // was added by LowerCallTo (guaranteeing proper serialization of calls).
+ LegalizeOp(CallInfo.second);
+ return CallInfo;
+}
+
SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_F32,
RTLIB::Libcall Call_F64,
@@ -2048,7 +2110,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr);
SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr);
- SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, TwoP84PlusTwoP52);
+ SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
+ TwoP84PlusTwoP52);
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
}
@@ -2058,11 +2121,11 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
if (Op0.getValueType() == MVT::i64 && DestVT == MVT::f32) {
EVT SHVT = TLI.getShiftAmountTy();
- SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
+ SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
DAG.getConstant(UINT64_C(0x800), MVT::i64));
- SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
+ SDValue And2 = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0x7ff), MVT::i64));
SDValue Ne = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
And2, DAG.getConstant(UINT64_C(0), MVT::i64), ISD::SETNE);
@@ -2122,7 +2185,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
false, false, Alignment);
else {
FudgeInReg =
- LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
+ LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, DestVT, dl,
DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
MVT::f32, false, false, Alignment));
@@ -2332,6 +2395,92 @@ SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op,
}
}
+std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
+ unsigned Opc = Node->getOpcode();
+ MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
+ RTLIB::Libcall LC;
+
+ switch (Opc) {
+ default:
+ llvm_unreachable("Unhandled atomic intrinsic Expand!");
+ break;
+ case ISD::ATOMIC_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break;
+ }
+ break;
+ case ISD::ATOMIC_CMP_SWAP:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_ADD:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_ADD_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_SUB:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_SUB_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_AND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_AND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_OR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_OR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_XOR:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_XOR_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break;
+ }
+ break;
+ case ISD::ATOMIC_LOAD_NAND:
+ switch (VT.SimpleTy) {
+ default: llvm_unreachable("Unexpected value type for atomic!");
+ case MVT::i8: LC = RTLIB::SYNC_FETCH_AND_NAND_1; break;
+ case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break;
+ case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break;
+ case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break;
+ }
+ break;
+ }
+
+ return ExpandChainLibCall(LC, Node, false);
+}
+
void SelectionDAGLegalize::ExpandNode(SDNode *Node,
SmallVectorImpl<SDValue> &Results) {
DebugLoc dl = Node->getDebugLoc();
@@ -2357,10 +2506,48 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EH_RETURN:
case ISD::EH_LABEL:
case ISD::PREFETCH:
- case ISD::MEMBARRIER:
case ISD::VAEND:
+ case ISD::EH_SJLJ_LONGJMP:
+ Results.push_back(Node->getOperand(0));
+ break;
+ case ISD::EH_SJLJ_SETJMP:
+ Results.push_back(DAG.getConstant(0, MVT::i32));
Results.push_back(Node->getOperand(0));
break;
+ case ISD::MEMBARRIER: {
+ // If the target didn't lower this, lower it to '__sync_synchronize()' call
+ TargetLowering::ArgListTy Args;
+ std::pair<SDValue, SDValue> CallResult =
+ TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false, 0, CallingConv::C, false,
+ /*isReturnValueUsed=*/true,
+ DAG.getExternalSymbol("__sync_synchronize",
+ TLI.getPointerTy()),
+ Args, DAG, dl);
+ Results.push_back(CallResult.second);
+ break;
+ }
+ // By default, atomic intrinsics are marked Legal and lowered. Targets
+ // which don't support them directly, however, may want libcalls, in which
+ // case they mark them Expand, and we get here.
+ // FIXME: Unimplemented for now. Add libcalls.
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_CMP_SWAP: {
+ std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
+ Results.push_back(Tmp.first);
+ Results.push_back(Tmp.second);
+ break;
+ }
case ISD::DYNAMIC_STACKALLOC:
ExpandDYNAMIC_STACKALLOC(Node, Results);
break;
@@ -2465,15 +2652,31 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
EVT VT = Node->getValueType(0);
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
- SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
- false, false, 0);
+ unsigned Align = Node->getConstantOperandVal(3);
+
+ SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
+ false, false, 0);
+ SDValue VAList = VAListLoad;
+
+ if (Align > TLI.getMinStackArgumentAlignment()) {
+ assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
+
+ VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
+ DAG.getConstant(Align - 1,
+ TLI.getPointerTy()));
+
+ VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList,
+ DAG.getConstant(-Align,
+ TLI.getPointerTy()));
+ }
+
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
DAG.getConstant(TLI.getTargetData()->
getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
- Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0,
+ Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, V, 0,
false, false, 0);
// Load the actual argument out of the pointer VAList
Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0,
@@ -2496,7 +2699,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EXTRACT_VECTOR_ELT:
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
// This must be an access of the only element. Return it.
- Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
+ Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
Node->getOperand(0));
else
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
@@ -2948,13 +3151,13 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
const TargetData &TD = *TLI.getTargetData();
unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
-
+
Index = DAG.getNode(ISD::MUL, dl, PTy,
Index, DAG.getConstant(EntrySize, PTy));
SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
- SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, dl, PTy, Chain, Addr,
+ SDValue LD = DAG.getExtLoad(ISD::SEXTLOAD, PTy, dl, Chain, Addr,
PseudoSourceValue::getJumpTable(), 0, MemVT,
false, false, 0);
Addr = LD;
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index e3eb949..650ee5a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -453,8 +453,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
SDValue NewL;
if (L->getExtensionType() == ISD::NON_EXTLOAD) {
- NewL = DAG.getLoad(L->getAddressingMode(), dl, L->getExtensionType(),
- NVT, L->getChain(), L->getBasePtr(), L->getOffset(),
+ NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(),
+ NVT, dl, L->getChain(), L->getBasePtr(), L->getOffset(),
L->getSrcValue(), L->getSrcValueOffset(), NVT,
L->isVolatile(), L->isNonTemporal(), L->getAlignment());
// Legalized the chain result - switch anything that used the old chain to
@@ -464,8 +464,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) {
}
// Do a non-extending load followed by FP_EXTEND.
- NewL = DAG.getLoad(L->getAddressingMode(), dl, ISD::NON_EXTLOAD,
- L->getMemoryVT(), L->getChain(),
+ NewL = DAG.getLoad(L->getAddressingMode(), ISD::NON_EXTLOAD,
+ L->getMemoryVT(), dl, L->getChain(),
L->getBasePtr(), L->getOffset(),
L->getSrcValue(), L->getSrcValueOffset(),
L->getMemoryVT(), L->isVolatile(),
@@ -504,7 +504,8 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_VAARG(SDNode *N) {
DebugLoc dl = N->getDebugLoc();
SDValue NewVAARG;
- NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
+ NewVAARG = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2),
+ N->getConstantOperandVal(3));
// Legalized the chain result - switch anything that used the old chain to
// use the new one.
@@ -698,9 +699,10 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
@@ -739,9 +741,10 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
@@ -757,8 +760,9 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
@@ -1106,7 +1110,7 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
assert(NVT.isByteSized() && "Expanded type not byte sized!");
assert(LD->getMemoryVT().bitsLE(NVT) && "Float type not round?");
- Hi = DAG.getExtLoad(LD->getExtensionType(), dl, NVT, Chain, Ptr,
+ Hi = DAG.getExtLoad(LD->getExtensionType(), NVT, dl, Chain, Ptr,
LD->getSrcValue(), LD->getSrcValueOffset(),
LD->getMemoryVT(), LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment());
@@ -1294,9 +1298,9 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
@@ -1375,9 +1379,9 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
@@ -1393,8 +1397,8 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 8b382bc..b94ea9a 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -369,7 +369,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) {
ISD::LoadExtType ExtType =
ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType();
DebugLoc dl = N->getDebugLoc();
- SDValue Res = DAG.getExtLoad(ExtType, dl, NVT, N->getChain(), N->getBasePtr(),
+ SDValue Res = DAG.getExtLoad(ExtType, NVT, dl, N->getChain(), N->getBasePtr(),
N->getSrcValue(), N->getSrcValueOffset(),
N->getMemoryVT(), N->isVolatile(),
N->isNonTemporal(), N->getAlignment());
@@ -572,7 +572,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) {
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned i = 0; i < NumRegs; ++i) {
- Parts[i] = DAG.getVAArg(RegVT, dl, Chain, Ptr, N->getOperand(2));
+ Parts[i] = DAG.getVAArg(RegVT, dl, Chain, Ptr, N->getOperand(2),
+ N->getConstantOperandVal(3));
Chain = Parts[i].getValue(1);
}
@@ -725,8 +726,9 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) {
// The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
// legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), LHS, RHS, N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), LHS, RHS, N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
@@ -737,8 +739,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Cond,
- N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Cond,
+ N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
@@ -773,7 +775,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
for (unsigned i = 0; i < NumElts; ++i)
NewOps.push_back(GetPromotedInteger(N->getOperand(i)));
- return DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0], NumElts);
+ return SDValue(DAG.UpdateNodeOperands(N, &NewOps[0], NumElts), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
@@ -798,17 +800,18 @@ SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
assert(N->getOperand(1).getValueType().getSizeInBits() >=
N->getValueType(0).getVectorElementType().getSizeInBits() &&
"Type of inserted value narrower than vector element type!");
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
- N->getOperand(2));
+ N->getOperand(2)),
+ 0);
}
assert(OpNo == 2 && "Different operand and result vector types?");
// Promote the index.
SDValue Idx = ZExtPromotedInteger(N->getOperand(2));
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), Idx);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), Idx), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
@@ -819,15 +822,14 @@ SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
SDValue Flag = GetPromotedInteger(N->getOperand(i));
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1);
}
- return DAG.UpdateNodeOperands(SDValue (N, 0), NewOps,
- array_lengthof(NewOps));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps, array_lengthof(NewOps)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) {
// Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote
// the operand in place.
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- GetPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ GetPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
@@ -837,8 +839,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
EVT SVT = TLI.getSetCCResultType(N->getOperand(1).getValueType());
SDValue Cond = PromoteTargetBoolean(N->getOperand(0), SVT);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Cond,
- N->getOperand(1), N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, Cond,
+ N->getOperand(1), N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
@@ -849,8 +851,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get());
// The CC (#4) and the possible return values (#2 and #3) have legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2),
- N->getOperand(3), N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2),
+ N->getOperand(3), N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
@@ -861,12 +863,12 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
// The CC (#2) is always legal.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_Shift(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- ZExtPromotedInteger(N->getOperand(1)));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ ZExtPromotedInteger(N->getOperand(1))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
@@ -878,8 +880,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntOp_SINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- SExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ SExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
@@ -905,8 +907,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) {
}
SDValue DAGTypeLegalizer::PromoteIntOp_UINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- ZExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ ZExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
@@ -990,6 +992,11 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: ExpandIntRes_Shift(N, Lo, Hi); break;
+
+ case ISD::SADDO:
+ case ISD::SSUBO: ExpandIntRes_SADDSUBO(N, Lo, Hi); break;
+ case ISD::UADDO:
+ case ISD::USUBO: ExpandIntRes_UADDSUBO(N, Lo, Hi); break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -1526,7 +1533,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
if (N->getMemoryVT().bitsLE(NVT)) {
EVT MemVT = N->getMemoryVT();
- Lo = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Lo = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
MemVT, isVolatile, isNonTemporal, Alignment);
// Remember the chain.
@@ -1559,7 +1566,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned IncrementSize = NVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
- Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(),
+ Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(),
SVOffset+IncrementSize, NEVT,
isVolatile, isNonTemporal,
MinAlign(Alignment, IncrementSize));
@@ -1577,7 +1584,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
unsigned ExcessBits = (EBytes - IncrementSize)*8;
// Load both the high bits and maybe some of the low bits.
- Hi = DAG.getExtLoad(ExtType, dl, NVT, Ch, Ptr, N->getSrcValue(), SVOffset,
+ Hi = DAG.getExtLoad(ExtType, NVT, dl, Ch, Ptr, N->getSrcValue(), SVOffset,
EVT::getIntegerVT(*DAG.getContext(),
MemVT.getSizeInBits() - ExcessBits),
isVolatile, isNonTemporal, Alignment);
@@ -1586,7 +1593,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
// Load the rest of the low bits.
- Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, NVT, Ch, Ptr, N->getSrcValue(),
+ Lo = DAG.getExtLoad(ISD::ZEXTLOAD, NVT, dl, Ch, Ptr, N->getSrcValue(),
SVOffset+IncrementSize,
EVT::getIntegerVT(*DAG.getContext(), ExcessBits),
isVolatile, isNonTemporal,
@@ -1716,6 +1723,48 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
SplitInteger(MakeLibCall(LC, VT, Ops, 2, true/*irrelevant*/, dl), Lo, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_SADDSUBO(SDNode *Node,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue LHS = Node->getOperand(0);
+ SDValue RHS = Node->getOperand(1);
+ DebugLoc dl = Node->getDebugLoc();
+
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Sum = DAG.getNode(Node->getOpcode() == ISD::SADDO ?
+ ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
+ LHS, RHS);
+ SplitInteger(Sum, Lo, Hi);
+
+ // Compute the overflow.
+ //
+ // LHSSign -> LHS >= 0
+ // RHSSign -> RHS >= 0
+ // SumSign -> Sum >= 0
+ //
+ // Add:
+ // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
+ // Sub:
+ // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
+ //
+ EVT OType = Node->getValueType(1);
+ SDValue Zero = DAG.getConstant(0, LHS.getValueType());
+
+ SDValue LHSSign = DAG.getSetCC(dl, OType, LHS, Zero, ISD::SETGE);
+ SDValue RHSSign = DAG.getSetCC(dl, OType, RHS, Zero, ISD::SETGE);
+ SDValue SignsMatch = DAG.getSetCC(dl, OType, LHSSign, RHSSign,
+ Node->getOpcode() == ISD::SADDO ?
+ ISD::SETEQ : ISD::SETNE);
+
+ SDValue SumSign = DAG.getSetCC(dl, OType, Sum, Zero, ISD::SETGE);
+ SDValue SumSignNE = DAG.getSetCC(dl, OType, LHSSign, SumSign, ISD::SETNE);
+
+ SDValue Cmp = DAG.getNode(ISD::AND, dl, OType, SignsMatch, SumSignNE);
+
+ // Use the calculated overflow everywhere.
+ ReplaceValueWith(SDValue(Node, 1), Cmp);
+}
+
void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
@@ -1912,6 +1961,29 @@ void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N,
Hi = DAG.getNode(ISD::TRUNCATE, dl, NVT, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
+ SDValue &Lo, SDValue &Hi) {
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ DebugLoc dl = N->getDebugLoc();
+
+ // Expand the result by simply replacing it with the equivalent
+ // non-overflow-checking operation.
+ SDValue Sum = DAG.getNode(N->getOpcode() == ISD::UADDO ?
+ ISD::ADD : ISD::SUB, dl, LHS.getValueType(),
+ LHS, RHS);
+ SplitInteger(Sum, Lo, Hi);
+
+ // Calculate the overflow: addition overflows iff a + b < a, and subtraction
+ // overflows iff a - b > a.
+ SDValue Ofl = DAG.getSetCC(dl, N->getValueType(1), Sum, LHS,
+ N->getOpcode () == ISD::UADDO ?
+ ISD::SETULT : ISD::SETUGT);
+
+ // Use the calculated overflow everywhere.
+ ReplaceValueWith(SDValue(N, 1), Ofl);
+}
+
void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
@@ -2154,9 +2226,9 @@ SDValue DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
@@ -2172,9 +2244,9 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
@@ -2190,8 +2262,8 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
@@ -2200,7 +2272,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
// upper half of the shift amount is zero. Just use the lower half.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(1), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
@@ -2209,7 +2281,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
// constant to valid type.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
@@ -2384,7 +2456,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
// Load the value out, extending it from f32 to the destination float type.
// FIXME: Avoid the extend by constructing the right constant pool?
- SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, DstVT, DAG.getEntryNode(),
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, DstVT, dl, DAG.getEntryNode(),
FudgePtr, NULL, 0, MVT::f32,
false, false, Alignment);
return DAG.getNode(ISD::FADD, dl, DstVT, SignedConv, Fudge);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 17f131b..6e56c98 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -485,15 +485,14 @@ SDNode *DAGTypeLegalizer::AnalyzeNewNode(SDNode *N) {
NewOps.push_back(Op);
} else if (Op != OrigOp) {
// This is the first operand to change - add all operands so far.
- NewOps.insert(NewOps.end(), N->op_begin(), N->op_begin() + i);
+ NewOps.append(N->op_begin(), N->op_begin() + i);
NewOps.push_back(Op);
}
}
// Some operands changed - update the node.
if (!NewOps.empty()) {
- SDNode *M = DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0],
- NewOps.size()).getNode();
+ SDNode *M = DAG.UpdateNodeOperands(N, &NewOps[0], NewOps.size());
if (M != N) {
// The node morphed into a different node. Normally for this to happen
// the original node would have to be marked NewNode. However this can
@@ -684,40 +683,45 @@ void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) {
// can potentially cause recursive merging.
SmallSetVector<SDNode*, 16> NodesToAnalyze;
NodeUpdateListener NUL(*this, NodesToAnalyze);
- DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
-
- // The old node may still be present in a map like ExpandedIntegers or
- // PromotedIntegers. Inform maps about the replacement.
- ReplacedValues[From] = To;
-
- // Process the list of nodes that need to be reanalyzed.
- while (!NodesToAnalyze.empty()) {
- SDNode *N = NodesToAnalyze.back();
- NodesToAnalyze.pop_back();
- if (N->getNodeId() != DAGTypeLegalizer::NewNode)
- // The node was analyzed while reanalyzing an earlier node - it is safe to
- // skip. Note that this is not a morphing node - otherwise it would still
- // be marked NewNode.
- continue;
+ do {
+ DAG.ReplaceAllUsesOfValueWith(From, To, &NUL);
+
+ // The old node may still be present in a map like ExpandedIntegers or
+ // PromotedIntegers. Inform maps about the replacement.
+ ReplacedValues[From] = To;
+
+ // Process the list of nodes that need to be reanalyzed.
+ while (!NodesToAnalyze.empty()) {
+ SDNode *N = NodesToAnalyze.back();
+ NodesToAnalyze.pop_back();
+ if (N->getNodeId() != DAGTypeLegalizer::NewNode)
+ // The node was analyzed while reanalyzing an earlier node - it is safe
+ // to skip. Note that this is not a morphing node - otherwise it would
+ // still be marked NewNode.
+ continue;
- // Analyze the node's operands and recalculate the node ID.
- SDNode *M = AnalyzeNewNode(N);
- if (M != N) {
- // The node morphed into a different node. Make everyone use the new node
- // instead.
- assert(M->getNodeId() != NewNode && "Analysis resulted in NewNode!");
- assert(N->getNumValues() == M->getNumValues() &&
- "Node morphing changed the number of results!");
- for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- SDValue OldVal(N, i);
- SDValue NewVal(M, i);
- if (M->getNodeId() == Processed)
- RemapValue(NewVal);
- DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ // Analyze the node's operands and recalculate the node ID.
+ SDNode *M = AnalyzeNewNode(N);
+ if (M != N) {
+ // The node morphed into a different node. Make everyone use the new
+ // node instead.
+ assert(M->getNodeId() != NewNode && "Analysis resulted in NewNode!");
+ assert(N->getNumValues() == M->getNumValues() &&
+ "Node morphing changed the number of results!");
+ for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
+ SDValue OldVal(N, i);
+ SDValue NewVal(M, i);
+ if (M->getNodeId() == Processed)
+ RemapValue(NewVal);
+ DAG.ReplaceAllUsesOfValueWith(OldVal, NewVal, &NUL);
+ }
+ // The original node continues to exist in the DAG, marked NewNode.
}
- // The original node continues to exist in the DAG, marked NewNode.
}
- }
+ // When recursively update nodes with new nodes, it is possible to have
+ // new uses of From due to CSE. If this happens, replace the new uses of
+ // From with To.
+ } while (!From.use_empty());
}
void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) {
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index c665963..bd86694 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -345,6 +345,9 @@ private:
void ExpandIntRes_UREM (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_Shift (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_SADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_UADDSUBO (SDNode *N, SDValue &Lo, SDValue &Hi);
+
void ExpandShiftByConstant(SDNode *N, unsigned Amt,
SDValue &Lo, SDValue &Hi);
bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi);
@@ -620,6 +623,7 @@ private:
SDValue WidenVecRes_Binary(SDNode *N);
SDValue WidenVecRes_Convert(SDNode *N);
+ SDValue WidenVecRes_POWI(SDNode *N);
SDValue WidenVecRes_Shift(SDNode *N);
SDValue WidenVecRes_Unary(SDNode *N);
SDValue WidenVecRes_InregOp(SDNode *N);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 88e1e62..9c2b1d9 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -238,13 +238,15 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
}
void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
- EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ EVT OVT = N->getValueType(0);
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
SDValue Chain = N->getOperand(0);
SDValue Ptr = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
+ const unsigned Align = N->getConstantOperandVal(3);
- Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
- Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2));
+ Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2), Align);
+ Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2), 0);
// Handle endianness of the load.
if (TLI.isBigEndian())
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 0e2bd02..621c087 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -116,7 +116,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Ops.push_back(LegalizeOp(Node->getOperand(i)));
SDValue Result =
- DAG.UpdateNodeOperands(Op.getValue(0), Ops.data(), Ops.size());
+ SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops.data(), Ops.size()), 0);
bool HasVectorValue = false;
for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 7efeea1..93aeff5 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -165,9 +165,10 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) {
SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) {
assert(N->isUnindexed() && "Indexed vector load?");
- SDValue Result = DAG.getLoad(ISD::UNINDEXED, N->getDebugLoc(),
+ SDValue Result = DAG.getLoad(ISD::UNINDEXED,
N->getExtensionType(),
N->getValueType(0).getVectorElementType(),
+ N->getDebugLoc(),
N->getChain(), N->getBasePtr(),
DAG.getUNDEF(N->getBasePtr().getValueType()),
N->getSrcValue(), N->getSrcValueOffset(),
@@ -448,6 +449,11 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FLOG:
+ case ISD::FLOG2:
+ case ISD::FLOG10:
SplitVecRes_UnaryOp(N, Lo, Hi);
break;
@@ -755,14 +761,14 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
EVT LoMemVT, HiMemVT;
GetSplitDestVTs(MemoryVT, LoMemVT, HiMemVT);
- Lo = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, LoVT, Ch, Ptr, Offset,
+ Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset,
SV, SVOffset, LoMemVT, isVolatile, isNonTemporal, Alignment);
unsigned IncrementSize = LoMemVT.getSizeInBits()/8;
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr,
DAG.getIntPtrConstant(IncrementSize));
SVOffset += IncrementSize;
- Hi = DAG.getLoad(ISD::UNINDEXED, dl, ExtType, HiVT, Ch, Ptr, Offset,
+ Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset,
SV, SVOffset, HiMemVT, isVolatile, isNonTemporal, Alignment);
// Build a factor node to remember that this load is independent of the
@@ -1082,10 +1088,11 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
if (IdxVal < LoElts)
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo, Idx);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Hi,
+ return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
+ return SDValue(DAG.UpdateNodeOperands(N, Hi,
DAG.getConstant(IdxVal - LoElts,
- Idx.getValueType()));
+ Idx.getValueType())),
+ 0);
}
// Store the vector to the stack.
@@ -1099,7 +1106,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- return DAG.getExtLoad(ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
+ return DAG.getExtLoad(ISD::EXTLOAD, N->getValueType(0), dl, Store, StackPtr,
SV, 0, EltVT, false, false, 0);
}
@@ -1199,7 +1206,6 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FDIV:
case ISD::FMUL:
case ISD::FPOW:
- case ISD::FPOWI:
case ISD::FREM:
case ISD::FSUB:
case ISD::MUL:
@@ -1215,6 +1221,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
Res = WidenVecRes_Binary(N);
break;
+ case ISD::FPOWI:
+ Res = WidenVecRes_POWI(N);
+ break;
+
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
@@ -1241,6 +1251,11 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FNEG:
case ISD::FSIN:
case ISD::FSQRT:
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FLOG:
+ case ISD::FLOG2:
+ case ISD::FLOG10:
Res = WidenVecRes_Unary(N);
break;
}
@@ -1258,7 +1273,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
EVT WidenEltVT = WidenVT.getVectorElementType();
EVT VT = WidenVT;
unsigned NumElts = VT.getVectorNumElements();
- while (!TLI.isTypeLegal(VT) && NumElts != 1) {
+ while (!TLI.isTypeSynthesizable(VT) && NumElts != 1) {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
}
@@ -1273,13 +1288,20 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
} else {
// Since the operation can trap, apply operation on the original vector.
+ EVT MaxVT = VT;
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
SmallVector<SDValue, 16> ConcatOps(CurNumElts);
unsigned ConcatEnd = 0; // Current ConcatOps index.
- unsigned Idx = 0; // Current Idx into input vectors.
+ int Idx = 0; // Current Idx into input vectors.
+
+ // NumElts := greatest synthesizable vector size (at most WidenVT)
+ // while (orig. vector has unhandled elements) {
+ // take munches of size NumElts from the beginning and add to ConcatOps
+ // NumElts := next smaller supported vector size or 1
+ // }
while (CurNumElts != 0) {
while (CurNumElts >= NumElts) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
@@ -1290,26 +1312,21 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
Idx += NumElts;
CurNumElts -= NumElts;
}
- EVT PrevVecVT = VT;
do {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
- } while (!TLI.isTypeLegal(VT) && NumElts != 1);
+ } while (!TLI.isTypeSynthesizable(VT) && NumElts != 1);
if (NumElts == 1) {
- // Since we are using concat vector, build a vector from the scalar ops.
- SDValue VecOp = DAG.getUNDEF(PrevVecVT);
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp1, DAG.getIntPtrConstant(Idx));
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp2, DAG.getIntPtrConstant(Idx));
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, PrevVecVT, VecOp,
- DAG.getNode(Opcode, dl, WidenEltVT, EOp1, EOp2),
- DAG.getIntPtrConstant(i));
+ ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
+ EOp1, EOp2);
}
CurNumElts = 0;
- ConcatOps[ConcatEnd++] = VecOp;
}
}
@@ -1320,23 +1337,65 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
return ConcatOps[0];
}
- // Rebuild vector to one with the widen type
- Idx = ConcatEnd - 1;
- while (Idx != 0) {
+ // while (Some element of ConcatOps is not of type MaxVT) {
+ // From the end of ConcatOps, collect elements of the same type and put
+ // them into an op of the next larger supported type
+ // }
+ while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
+ Idx = ConcatEnd - 1;
VT = ConcatOps[Idx--].getValueType();
- while (Idx != 0 && ConcatOps[Idx].getValueType() == VT)
- --Idx;
- if (Idx != 0) {
- VT = ConcatOps[Idx].getValueType();
- ConcatOps[Idx+1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- &ConcatOps[Idx+1], ConcatEnd - Idx - 1);
+ while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
+ Idx--;
+
+ int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1;
+ EVT NextVT;
+ do {
+ NextSize *= 2;
+ NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
+ } while (!TLI.isTypeSynthesizable(NextVT));
+
+ if (!VT.isVector()) {
+ // Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
+ SDValue VecOp = DAG.getUNDEF(NextVT);
+ unsigned NumToInsert = ConcatEnd - Idx - 1;
+ for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
+ VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp,
+ ConcatOps[OpIdx], DAG.getIntPtrConstant(i));
+ }
+ ConcatOps[Idx+1] = VecOp;
ConcatEnd = Idx + 2;
+ }
+ else {
+ // Vector type, create a CONCAT_VECTORS of type NextVT
+ SDValue undefVec = DAG.getUNDEF(VT);
+ unsigned OpsToConcat = NextSize/VT.getVectorNumElements();
+ SmallVector<SDValue, 16> SubConcatOps(OpsToConcat);
+ unsigned RealVals = ConcatEnd - Idx - 1;
+ unsigned SubConcatEnd = 0;
+ unsigned SubConcatIdx = Idx + 1;
+ while (SubConcatEnd < RealVals)
+ SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
+ while (SubConcatEnd < OpsToConcat)
+ SubConcatOps[SubConcatEnd++] = undefVec;
+ ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
+ NextVT, &SubConcatOps[0],
+ OpsToConcat);
+ ConcatEnd = SubConcatIdx + 1;
}
}
+
+ // Check to see if we have a single operation with the widen type.
+ if (ConcatEnd == 1) {
+ VT = ConcatOps[0].getValueType();
+ if (VT == WidenVT)
+ return ConcatOps[0];
+ }
- unsigned NumOps = WidenVT.getVectorNumElements()/VT.getVectorNumElements();
+ // add undefs of size MaxVT until ConcatOps grows to length of WidenVT
+ unsigned NumOps =
+ WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
if (NumOps != ConcatEnd ) {
- SDValue UndefVal = DAG.getUNDEF(VT);
+ SDValue UndefVal = DAG.getUNDEF(MaxVT);
for (unsigned j = ConcatEnd; j < NumOps; ++j)
ConcatOps[j] = UndefVal;
}
@@ -1366,7 +1425,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
return DAG.getNode(Opcode, dl, WidenVT, InOp);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1410,6 +1469,13 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, WidenVT, &Ops[0], WidenNumElts);
}
+SDValue DAGTypeLegalizer::WidenVecRes_POWI(SDNode *N) {
+ EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ SDValue InOp = GetWidenedVector(N->getOperand(0));
+ SDValue ShOp = N->getOperand(1);
+ return DAG.getNode(N->getOpcode(), N->getDebugLoc(), WidenVT, InOp, ShOp);
+}
+
SDValue DAGTypeLegalizer::WidenVecRes_Shift(SDNode *N) {
EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
SDValue InOp = GetWidenedVector(N->getOperand(0));
@@ -1501,7 +1567,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
- if (TLI.isTypeLegal(NewInVT)) {
+ if (TLI.isTypeSynthesizable(NewInVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1642,7 +1708,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_CONVERT_RNDSAT(SDNode *N) {
SatOp, CvtCode);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1968,7 +2034,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
if (InWidenSize % Size == 0 && !VT.isVector()) {
unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
- if (TLI.isTypeLegal(NewVT)) {
+ if (TLI.isTypeSynthesizable(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getIntPtrConstant(0));
@@ -2066,7 +2132,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
unsigned MemVTWidth = MemVT.getSizeInBits();
if (MemVT.getSizeInBits() <= WidenEltWidth)
break;
- if (TLI.isTypeLegal(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
+ if (TLI.isTypeSynthesizable(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
RetVT = MemVT;
@@ -2080,7 +2146,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) {
EVT MemVT = (MVT::SimpleValueType) VT;
unsigned MemVTWidth = MemVT.getSizeInBits();
- if (TLI.isTypeLegal(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
+ if (TLI.isTypeSynthesizable(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
(WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
@@ -2286,14 +2352,14 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
unsigned WidenNumElts = WidenVT.getVectorNumElements();
SmallVector<SDValue, 16> Ops(WidenNumElts);
unsigned Increment = LdEltVT.getSizeInBits() / 8;
- Ops[0] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, BasePtr, SV, SVOffset,
+ Ops[0] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, BasePtr, SV, SVOffset,
LdEltVT, isVolatile, isNonTemporal, Align);
LdChain.push_back(Ops[0].getValue(1));
unsigned i = 0, Offset = Increment;
for (i=1; i < NumElts; ++i, Offset += Increment) {
SDValue NewBasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
BasePtr, DAG.getIntPtrConstant(Offset));
- Ops[i] = DAG.getExtLoad(ExtType, dl, EltVT, Chain, NewBasePtr, SV,
+ Ops[i] = DAG.getExtLoad(ExtType, EltVT, dl, Chain, NewBasePtr, SV,
SVOffset + Offset, LdEltVT, isVolatile,
isNonTemporal, Align);
LdChain.push_back(Ops[i].getValue(1));
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index ad8630a..3b86c32 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -535,7 +535,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
SUnit *LRDef = LiveRegDefs[Reg];
EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, VT);
+ TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is null, then it must be possible copy
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index 820ba66..3ef521c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -320,7 +320,7 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
CapturePred(&*I);
- if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
+ if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
@@ -795,7 +795,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() {
SUnit *LRDef = LiveRegDefs[Reg];
EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, VT);
+ TRI->getMinimalPhysRegClass(Reg, VT);
const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
// If cross copy register class is null, then it must be possible copy
@@ -1116,7 +1116,7 @@ namespace {
SUnit *pop() {
if (empty()) return NULL;
std::vector<SUnit *>::iterator Best = Queue.begin();
- for (std::vector<SUnit *>::iterator I = next(Queue.begin()),
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
@@ -1275,6 +1275,17 @@ bool hybrid_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
return left->getHeight() > right->getHeight();
} else if (RStall)
return false;
+
+ // If either node is scheduling for latency, sort them by height and latency
+ // first.
+ if (left->SchedulingPref == Sched::Latency ||
+ right->SchedulingPref == Sched::Latency) {
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
+ if (left->Latency != right->Latency)
+ return left->Latency > right->Latency;
+ }
+
return BURRSort(left, right, SPQ);
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 3185c88..06cf053 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -59,7 +59,11 @@ SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
SUnits.back().OrigNode = &SUnits.back();
SUnit *SU = &SUnits.back();
const TargetLowering &TLI = DAG->getTargetLoweringInfo();
- SU->SchedulingPref = TLI.getSchedulingPreference(N);
+ if (N->isMachineOpcode() &&
+ N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)
+ SU->SchedulingPref = Sched::None;
+ else
+ SU->SchedulingPref = TLI.getSchedulingPreference(N);
return SU;
}
@@ -97,7 +101,7 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
PhysReg = Reg;
const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
+ TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo));
Cost = RC->getCopyCost();
}
}
@@ -106,17 +110,42 @@ static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
SelectionDAG *DAG) {
SmallVector<EVT, 4> VTs;
- for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
- VTs.push_back(N->getValueType(i));
+ SDNode *FlagDestNode = Flag.getNode();
+
+ // Don't add a flag from a node to itself.
+ if (FlagDestNode == N) return;
+
+ // Don't add a flag to something which already has a flag.
+ if (N->getValueType(N->getNumValues() - 1) == MVT::Flag) return;
+
+ for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+ VTs.push_back(N->getValueType(I));
+
if (AddFlag)
VTs.push_back(MVT::Flag);
+
SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- Ops.push_back(N->getOperand(i));
- if (Flag.getNode())
+ for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
+ Ops.push_back(N->getOperand(I));
+
+ if (FlagDestNode)
Ops.push_back(Flag);
+
SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
+ MachineSDNode::mmo_iterator Begin = 0, End = 0;
+ MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
+
+ // Store memory references.
+ if (MN) {
+ Begin = MN->memoperands_begin();
+ End = MN->memoperands_end();
+ }
+
DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
+
+ // Reset the memory references
+ if (MN)
+ MN->setMemRefs(Begin, End);
}
/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
@@ -124,98 +153,98 @@ static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
/// offsets are not far apart (target specific), it add MVT::Flag inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
-void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
+void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
+ SDNode *Chain = 0;
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
+ Chain = Node->getOperand(NumOps-1).getNode();
+ if (!Chain)
+ return;
+
+ // Look for other loads of the same chain. Find loads that are loading from
+ // the same base pointer and different offsets.
SmallPtrSet<SDNode*, 16> Visited;
SmallVector<int64_t, 4> Offsets;
DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
- for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); NI != E; ++NI) {
- SDNode *Node = &*NI;
- if (!Node || !Node->isMachineOpcode())
+ bool Cluster = false;
+ SDNode *Base = Node;
+ for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
+ I != E; ++I) {
+ SDNode *User = *I;
+ if (User == Node || !Visited.insert(User))
continue;
-
- unsigned Opc = Node->getMachineOpcode();
- const TargetInstrDesc &TID = TII->get(Opc);
- if (!TID.mayLoad())
+ int64_t Offset1, Offset2;
+ if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
+ Offset1 == Offset2)
+ // FIXME: Should be ok if they addresses are identical. But earlier
+ // optimizations really should have eliminated one of the loads.
continue;
+ if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
+ Offsets.push_back(Offset1);
+ O2SMap.insert(std::make_pair(Offset2, User));
+ Offsets.push_back(Offset2);
+ if (Offset2 < Offset1)
+ Base = User;
+ Cluster = true;
+ }
- SDNode *Chain = 0;
- unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
- Chain = Node->getOperand(NumOps-1).getNode();
- if (!Chain)
- continue;
+ if (!Cluster)
+ return;
- // Look for other loads of the same chain. Find loads that are loading from
- // the same base pointer and different offsets.
- Visited.clear();
- Offsets.clear();
- O2SMap.clear();
- bool Cluster = false;
- SDNode *Base = Node;
- int64_t BaseOffset;
- for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
- I != E; ++I) {
- SDNode *User = *I;
- if (User == Node || !Visited.insert(User))
- continue;
- int64_t Offset1, Offset2;
- if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
- Offset1 == Offset2)
- // FIXME: Should be ok if they addresses are identical. But earlier
- // optimizations really should have eliminated one of the loads.
- continue;
- if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
- Offsets.push_back(Offset1);
- O2SMap.insert(std::make_pair(Offset2, User));
- Offsets.push_back(Offset2);
- if (Offset2 < Offset1) {
- Base = User;
- BaseOffset = Offset2;
- } else {
- BaseOffset = Offset1;
- }
- Cluster = true;
- }
+ // Sort them in increasing order.
+ std::sort(Offsets.begin(), Offsets.end());
+
+ // Check if the loads are close enough.
+ SmallVector<SDNode*, 4> Loads;
+ unsigned NumLoads = 0;
+ int64_t BaseOff = Offsets[0];
+ SDNode *BaseLoad = O2SMap[BaseOff];
+ Loads.push_back(BaseLoad);
+ for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
+ int64_t Offset = Offsets[i];
+ SDNode *Load = O2SMap[Offset];
+ if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
+ break; // Stop right here. Ignore loads that are further away.
+ Loads.push_back(Load);
+ ++NumLoads;
+ }
- if (!Cluster)
- continue;
+ if (NumLoads == 0)
+ return;
- // Sort them in increasing order.
- std::sort(Offsets.begin(), Offsets.end());
-
- // Check if the loads are close enough.
- SmallVector<SDNode*, 4> Loads;
- unsigned NumLoads = 0;
- int64_t BaseOff = Offsets[0];
- SDNode *BaseLoad = O2SMap[BaseOff];
- Loads.push_back(BaseLoad);
- for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
- int64_t Offset = Offsets[i];
- SDNode *Load = O2SMap[Offset];
- if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
- NumLoads))
- break; // Stop right here. Ignore loads that are further away.
- Loads.push_back(Load);
- ++NumLoads;
- }
+ // Cluster loads by adding MVT::Flag outputs and inputs. This also
+ // ensure they are scheduled in order of increasing addresses.
+ SDNode *Lead = Loads[0];
+ AddFlags(Lead, SDValue(0, 0), true, DAG);
+
+ SDValue InFlag = SDValue(Lead, Lead->getNumValues() - 1);
+ for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
+ bool OutFlag = I < E - 1;
+ SDNode *Load = Loads[I];
+
+ AddFlags(Load, InFlag, OutFlag, DAG);
+
+ if (OutFlag)
+ InFlag = SDValue(Load, Load->getNumValues() - 1);
+
+ ++LoadsClustered;
+ }
+}
- if (NumLoads == 0)
+/// ClusterNodes - Cluster certain nodes which should be scheduled together.
+///
+void ScheduleDAGSDNodes::ClusterNodes() {
+ for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
+ E = DAG->allnodes_end(); NI != E; ++NI) {
+ SDNode *Node = &*NI;
+ if (!Node || !Node->isMachineOpcode())
continue;
- // Cluster loads by adding MVT::Flag outputs and inputs. This also
- // ensure they are scheduled in order of increasing addresses.
- SDNode *Lead = Loads[0];
- AddFlags(Lead, SDValue(0,0), true, DAG);
- SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
- for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
- bool OutFlag = i < e-1;
- SDNode *Load = Loads[i];
- AddFlags(Load, InFlag, OutFlag, DAG);
- if (OutFlag)
- InFlag = SDValue(Load, Load->getNumValues()-1);
- ++LoadsClustered;
- }
+ unsigned Opc = Node->getMachineOpcode();
+ const TargetInstrDesc &TID = TII->get(Opc);
+ if (TID.mayLoad())
+ // Cluster loads from "near" addresses into combined SUnits.
+ ClusterNeighboringLoads(Node);
}
}
@@ -364,8 +393,10 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (Cost >= 0)
PhysReg = 0;
- const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
- OpSU->Latency, PhysReg);
+ // If this is a ctrl dep, latency is 1.
+ unsigned OpLatency = isChain ? 1 : OpSU->Latency;
+ const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
+ OpLatency, PhysReg);
if (!isChain && !UnitLatencies) {
ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
@@ -382,8 +413,8 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
- // Cluster loads from "near" addresses into combined SUnits.
- ClusterNeighboringLoads();
+ // Cluster certain nodes which should be scheduled together.
+ ClusterNodes();
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
@@ -427,15 +458,18 @@ void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
return;
unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
- if (Def->isMachineOpcode() && Use->isMachineOpcode()) {
+ if (Def->isMachineOpcode()) {
const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
if (DefIdx >= II.getNumDefs())
return;
int DefCycle = InstrItins.getOperandCycle(II.getSchedClass(), DefIdx);
if (DefCycle < 0)
return;
- const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
- int UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
+ int UseCycle = 1;
+ if (Use->isMachineOpcode()) {
+ const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
+ UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
+ }
if (UseCycle >= 0) {
int Latency = DefCycle - UseCycle + 1;
if (Latency >= 0)
@@ -473,7 +507,7 @@ namespace {
}
// ProcessSourceNode - Process nodes with source order numbers. These are added
-// to a vector which EmitSchedule use to determine how to insert dbg_value
+// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
InstrEmitter &Emitter,
@@ -485,13 +519,13 @@ static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
return;
MachineBasicBlock *BB = Emitter.getBlock();
- if (BB->empty() || BB->back().isPHI()) {
+ if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
// Did not insert any instruction.
Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
return;
}
- Orders.push_back(std::make_pair(Order, &BB->back()));
+ Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
if (!N->getHasDebugValue())
return;
// Opportunistically insert immediate dbg_value uses, i.e. those with source
@@ -530,7 +564,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
for (; PDI != PDE; ++PDI) {
MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
if (DbgMI)
- BB->insert(BB->end(), DbgMI);
+ BB->insert(InsertPos, DbgMI);
}
}
@@ -574,9 +608,7 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// Insert all the dbg_values which have not already been inserted in source
// order sequence.
if (HasDbg) {
- MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
- while (BBBegin != BB->end() && BBBegin->isPHI())
- ++BBBegin;
+ MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
// Sort the source order instructions and use the order to insert debug
// values.
@@ -586,14 +618,12 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
// Now emit the rest according to source order.
unsigned LastOrder = 0;
- MachineInstr *LastMI = 0;
for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
unsigned Order = Orders[i].first;
MachineInstr *MI = Orders[i].second;
// Insert all SDDbgValue's whose order(s) are before "Order".
if (!MI)
continue;
- MachineBasicBlock *MIBB = MI->getParent();
#ifndef NDEBUG
unsigned LastDIOrder = 0;
#endif
@@ -612,13 +642,14 @@ MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
// Insert to start of the BB (after PHIs).
BB->insert(BBBegin, DbgMI);
else {
+ // Insert at the instruction, which may be in a different
+ // block, if the block was split by a custom inserter.
MachineBasicBlock::iterator Pos = MI;
- MIBB->insert(llvm::next(Pos), DbgMI);
+ MI->getParent()->insert(llvm::next(Pos), DbgMI);
}
}
}
LastOrder = Order;
- LastMI = MI;
}
// Add trailing DbgValue's before the terminator. FIXME: May want to add
// some of them before one or more conditional branches?
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index e8714ba..842fc8c 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -108,7 +108,10 @@ namespace llvm {
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
- void ClusterNeighboringLoads();
+ void ClusterNeighboringLoads(SDNode *Node);
+ /// ClusterNodes - Cluster certain nodes which should be scheduled together.
+ ///
+ void ClusterNodes();
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 38bf68b..e83a034 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -790,9 +790,8 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
}
// EntryNode could meaningfully have debug info if we can find it...
-SelectionDAG::SelectionDAG(const TargetMachine &tm, FunctionLoweringInfo &fli)
+SelectionDAG::SelectionDAG(const TargetMachine &tm)
: TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
- FLI(fli),
EntryNode(ISD::EntryToken, DebugLoc(), getVTList(MVT::Other)),
Root(getEntryNode()), Ordering(0) {
AllNodes.push_back(&EntryNode);
@@ -808,7 +807,6 @@ void SelectionDAG::init(MachineFunction &mf) {
SelectionDAG::~SelectionDAG() {
allnodes_clear();
delete Ordering;
- DbgInfo->clear();
delete DbgInfo;
}
@@ -835,11 +833,8 @@ void SelectionDAG::clear() {
EntryNode.UseList = 0;
AllNodes.push_back(&EntryNode);
Root = getEntryNode();
- delete Ordering;
- Ordering = new SDNodeOrdering();
+ Ordering->clear();
DbgInfo->clear();
- delete DbgInfo;
- DbgInfo = new SDDbgInfo();
}
SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
@@ -980,7 +975,7 @@ SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
}
}
-SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
+SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
EVT VT, int64_t Offset,
bool isTargetGA,
unsigned char TargetFlags) {
@@ -1015,7 +1010,7 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV,
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0);
- SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, GV, VT,
+ SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL, GV, VT,
Offset, TargetFlags);
CSEMap.InsertNode(N, IP);
AllNodes.push_back(N);
@@ -2291,7 +2286,6 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
unsigned i) {
EVT VT = N->getValueType(0);
- DebugLoc dl = N->getDebugLoc();
if (N->getMaskElt(i) < 0)
return getUNDEF(VT.getVectorElementType());
unsigned Index = N->getMaskElt(i);
@@ -2475,9 +2469,18 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
- if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
+
+ if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
+ OpOpcode == ISD::ANY_EXTEND)
// (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
+
+ // (ext (trunx x)) -> x
+ if (OpOpcode == ISD::TRUNCATE) {
+ SDValue OpOp = Operand.getNode()->getOperand(0);
+ if (OpOp.getValueType() == VT)
+ return OpOp;
+ }
break;
case ISD::TRUNCATE:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
@@ -2622,7 +2625,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (N1.getOpcode() == ISD::BUILD_VECTOR &&
N2.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
- Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
+ Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
}
break;
@@ -3011,7 +3014,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDValue N1, SDValue N2, SDValue N3) {
// Perform various simplifications.
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
switch (Opcode) {
case ISD::CONCAT_VECTORS:
// A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
@@ -3020,8 +3022,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
N2.getOpcode() == ISD::BUILD_VECTOR &&
N3.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
- Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
- Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
+ Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
+ Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
}
break;
@@ -3041,14 +3043,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
if (N2 == N3) return N2; // select C, X, X -> X
break;
- case ISD::BRCOND:
- if (N2C) {
- if (N2C->getZExtValue()) // Unconditional branch
- return getNode(ISD::BR, DL, MVT::Other, N1, N3);
- else
- return N1; // Never-taken branch
- }
- break;
case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!");
break;
@@ -3267,6 +3261,15 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
if (VT.bitsGT(LVT))
VT = LVT;
}
+
+ // If we're optimizing for size, and there is a limit, bump the maximum number
+ // of operations inserted down to 4. This is a wild guess that approximates
+ // the size of a call to memcpy or memset (3 arguments + call).
+ if (Limit != ~0U) {
+ const Function *F = DAG.getMachineFunction().getFunction();
+ if (F->hasFnAttr(Attribute::OptimizeForSize))
+ Limit = 4;
+ }
unsigned NumMemOps = 0;
while (Size != 0) {
@@ -3321,9 +3324,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
std::string Str;
bool CopyFromStr = isMemSrcFromString(Src, Str);
bool isZeroStr = CopyFromStr && Str.empty();
- uint64_t Limit = -1ULL;
- if (!AlwaysInline)
- Limit = TLI.getMaxStoresPerMemcpy();
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy();
+
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign),
@@ -3368,7 +3370,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// FIXME does the case above also need this?
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
assert(NVT.bitsGE(VT));
- Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
+ Value = DAG.getExtLoad(ISD::EXTLOAD, NVT, dl, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
SrcSV, SrcSVOff + SrcOff, VT, isVol, false,
MinAlign(SrcAlign, SrcOff));
@@ -3401,9 +3403,6 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
// below a certain threshold.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
std::vector<EVT> MemOps;
- uint64_t Limit = -1ULL;
- if (!AlwaysInline)
- Limit = TLI.getMaxStoresPerMemmove();
bool DstAlignCanChange = false;
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
@@ -3412,6 +3411,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
unsigned SrcAlign = DAG.InferPtrAlignment(Src);
if (Align > SrcAlign)
SrcAlign = Align;
+ unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove();
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
@@ -3895,8 +3895,8 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
}
SDValue
-SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
- ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset,
const Value *SV, int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal,
@@ -3919,12 +3919,12 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
MachineMemOperand *MMO =
MF.getMachineMemOperand(SV, Flags, SVOffset,
MemVT.getStoreSize(), Alignment);
- return getLoad(AM, dl, ExtType, VT, Chain, Ptr, Offset, MemVT, MMO);
+ return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
}
SDValue
-SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
- ISD::LoadExtType ExtType, EVT VT, SDValue Chain,
+SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) {
if (VT == MemVT) {
@@ -3974,18 +3974,18 @@ SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
bool isVolatile, bool isNonTemporal,
unsigned Alignment) {
SDValue Undef = getUNDEF(Ptr.getValueType());
- return getLoad(ISD::UNINDEXED, dl, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef,
+ return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
SV, SVOffset, VT, isVolatile, isNonTemporal, Alignment);
}
-SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
+SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
const Value *SV,
int SVOffset, EVT MemVT,
bool isVolatile, bool isNonTemporal,
unsigned Alignment) {
SDValue Undef = getUNDEF(Ptr.getValueType());
- return getLoad(ISD::UNINDEXED, dl, ExtType, VT, Chain, Ptr, Undef,
+ return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
SV, SVOffset, MemVT, isVolatile, isNonTemporal, Alignment);
}
@@ -3995,7 +3995,7 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
"Load is already a indexed load!");
- return getLoad(AM, dl, LD->getExtensionType(), OrigLoad.getValueType(),
+ return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
LD->getChain(), Base, Offset, LD->getSrcValue(),
LD->getSrcValueOffset(), LD->getMemoryVT(),
LD->isVolatile(), LD->isNonTemporal(), LD->getAlignment());
@@ -4141,9 +4141,10 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
- SDValue SV) {
- SDValue Ops[] = { Chain, Ptr, SV };
- return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
+ SDValue SV,
+ unsigned Align) {
+ SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
+ return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
@@ -4425,17 +4426,16 @@ SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
-SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
// Check to see if there is no change.
- if (Op == N->getOperand(0)) return InN;
+ if (Op == N->getOperand(0)) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4447,22 +4447,20 @@ SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
// Check to see if there is no change.
if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
- return InN; // No operands changed, just return the input node.
+ return N; // No operands changed, just return the input node.
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4477,32 +4475,31 @@ UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) {
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
SDValue Ops[] = { Op1, Op2, Op3 };
return UpdateNodeOperands(N, Ops, 3);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4) {
SDValue Ops[] = { Op1, Op2, Op3, Op4 };
return UpdateNodeOperands(N, Ops, 4);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5) {
SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
return UpdateNodeOperands(N, Ops, 5);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
assert(N->getNumOperands() == NumOps &&
"Update with wrong number of operands");
@@ -4516,12 +4513,12 @@ UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
}
// No operands changed, just return the input node.
- if (!AnyChange) return InN;
+ if (!AnyChange) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4535,7 +4532,7 @@ UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
/// DropOperands - Release the operands and set this node to have
@@ -5378,9 +5375,10 @@ HandleSDNode::~HandleSDNode() {
DropOperands();
}
-GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, const GlobalValue *GA,
+GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, DebugLoc DL,
+ const GlobalValue *GA,
EVT VT, int64_t o, unsigned char TF)
- : SDNode(Opc, DebugLoc(), getSDVTList(VT)), Offset(o), TargetFlags(TF) {
+ : SDNode(Opc, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
TheGlobal = GA;
}
@@ -5669,13 +5667,16 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FSQRT: return "fsqrt";
case ISD::FSIN: return "fsin";
case ISD::FCOS: return "fcos";
- case ISD::FPOWI: return "fpowi";
- case ISD::FPOW: return "fpow";
case ISD::FTRUNC: return "ftrunc";
case ISD::FFLOOR: return "ffloor";
case ISD::FCEIL: return "fceil";
case ISD::FRINT: return "frint";
case ISD::FNEARBYINT: return "fnearbyint";
+ case ISD::FEXP: return "fexp";
+ case ISD::FEXP2: return "fexp2";
+ case ISD::FLOG: return "flog";
+ case ISD::FLOG2: return "flog2";
+ case ISD::FLOG10: return "flog10";
// Binary operators
case ISD::ADD: return "add";
@@ -5706,7 +5707,9 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::FREM: return "frem";
case ISD::FCOPYSIGN: return "fcopysign";
case ISD::FGETSIGN: return "fgetsign";
+ case ISD::FPOW: return "fpow";
+ case ISD::FPOWI: return "fpowi";
case ISD::SETCC: return "setcc";
case ISD::VSETCC: return "vsetcc";
case ISD::SELECT: return "select";
@@ -6260,23 +6263,6 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
FrameOffset);
- if (MFI.isFixedObjectIndex(FrameIdx)) {
- int64_t ObjectOffset = MFI.getObjectOffset(FrameIdx) + FrameOffset;
-
- // The alignment of the frame index can be determined from its offset from
- // the incoming frame position. If the frame object is at offset 32 and
- // the stack is guaranteed to be 16-byte aligned, then we know that the
- // object is 16-byte aligned.
- unsigned StackAlign = getTarget().getFrameInfo()->getStackAlignment();
- unsigned Align = MinAlign(ObjectOffset, StackAlign);
-
- // Finally, the frame object itself may have a known alignment. Factor
- // the alignment + offset into a new alignment. For example, if we know
- // the FI is 8 byte aligned, but the pointer is 4 off, we really have a
- // 4-byte alignment of the resultant pointer. Likewise align 4 + 4-byte
- // offset = 4-byte alignment, align 4 + 1-byte offset = align 1, etc.
- return std::max(Align, FIInfoAlign);
- }
return FIInfoAlign;
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index fbe601f..458e865 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -14,7 +14,6 @@
#define DEBUG_TYPE "isel"
#include "SDNodeDbgValue.h"
#include "SelectionDAGBuilder.h"
-#include "FunctionLoweringInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -32,6 +31,7 @@
#include "llvm/Module.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -70,113 +70,6 @@ LimitFPPrecision("limit-float-precision",
cl::location(LimitFloatPrecision),
cl::init(0));
-namespace {
- /// RegsForValue - This struct represents the registers (physical or virtual)
- /// that a particular set of values is assigned, and the type information
- /// about the value. The most common situation is to represent one value at a
- /// time, but struct or array values are handled element-wise as multiple
- /// values. The splitting of aggregates is performed recursively, so that we
- /// never have aggregate-typed registers. The values at this point do not
- /// necessarily have legal types, so each value may require one or more
- /// registers of some legal type.
- ///
- struct RegsForValue {
- /// TLI - The TargetLowering object.
- ///
- const TargetLowering *TLI;
-
- /// ValueVTs - The value types of the values, which may not be legal, and
- /// may need be promoted or synthesized from one or more registers.
- ///
- SmallVector<EVT, 4> ValueVTs;
-
- /// RegVTs - The value types of the registers. This is the same size as
- /// ValueVTs and it records, for each value, what the type of the assigned
- /// register or registers are. (Individual values are never synthesized
- /// from more than one type of register.)
- ///
- /// With virtual registers, the contents of RegVTs is redundant with TLI's
- /// getRegisterType member function, however when with physical registers
- /// it is necessary to have a separate record of the types.
- ///
- SmallVector<EVT, 4> RegVTs;
-
- /// Regs - This list holds the registers assigned to the values.
- /// Each legal or promoted value requires one register, and each
- /// expanded value requires multiple registers.
- ///
- SmallVector<unsigned, 4> Regs;
-
- RegsForValue() : TLI(0) {}
-
- RegsForValue(const TargetLowering &tli,
- const SmallVector<unsigned, 4> &regs,
- EVT regvt, EVT valuevt)
- : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
- RegsForValue(const TargetLowering &tli,
- const SmallVector<unsigned, 4> &regs,
- const SmallVector<EVT, 4> &regvts,
- const SmallVector<EVT, 4> &valuevts)
- : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
- RegsForValue(LLVMContext &Context, const TargetLowering &tli,
- unsigned Reg, const Type *Ty) : TLI(&tli) {
- ComputeValueVTs(tli, Ty, ValueVTs);
-
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
- EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
- for (unsigned i = 0; i != NumRegs; ++i)
- Regs.push_back(Reg + i);
- RegVTs.push_back(RegisterVT);
- Reg += NumRegs;
- }
- }
-
- /// areValueTypesLegal - Return true if types of all the values are legal.
- bool areValueTypesLegal() {
- for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT RegisterVT = RegVTs[Value];
- if (!TLI->isTypeLegal(RegisterVT))
- return false;
- }
- return true;
- }
-
-
- /// append - Add the specified values to this one.
- void append(const RegsForValue &RHS) {
- TLI = RHS.TLI;
- ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
- RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
- Regs.append(RHS.Regs.begin(), RHS.Regs.end());
- }
-
-
- /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
- /// this value and returns the result as a ValueVTs value. This uses
- /// Chain/Flag as the input and updates them for the output Chain/Flag.
- /// If the Flag pointer is NULL, no flag is used.
- SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
-
- /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
- /// specified value into the registers specified by this object. This uses
- /// Chain/Flag as the input and updates them for the output Chain/Flag.
- /// If the Flag pointer is NULL, no flag is used.
- void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const;
-
- /// AddInlineAsmOperands - Add this value to the specified inlineasm node
- /// operand list. This adds the code marker, matching input operand index
- /// (if applicable), and includes the number of values added into it.
- void AddInlineAsmOperands(unsigned Kind,
- bool HasMatching, unsigned MatchingIdx,
- SelectionDAG &DAG,
- std::vector<SDValue> &Ops) const;
- };
-}
-
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
/// larger then ValueVT then AssertOp can be used to specify whether the extra
@@ -528,6 +421,268 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl,
}
}
+namespace {
+ /// RegsForValue - This struct represents the registers (physical or virtual)
+ /// that a particular set of values is assigned, and the type information
+ /// about the value. The most common situation is to represent one value at a
+ /// time, but struct or array values are handled element-wise as multiple
+ /// values. The splitting of aggregates is performed recursively, so that we
+ /// never have aggregate-typed registers. The values at this point do not
+ /// necessarily have legal types, so each value may require one or more
+ /// registers of some legal type.
+ ///
+ struct RegsForValue {
+ /// ValueVTs - The value types of the values, which may not be legal, and
+ /// may need be promoted or synthesized from one or more registers.
+ ///
+ SmallVector<EVT, 4> ValueVTs;
+
+ /// RegVTs - The value types of the registers. This is the same size as
+ /// ValueVTs and it records, for each value, what the type of the assigned
+ /// register or registers are. (Individual values are never synthesized
+ /// from more than one type of register.)
+ ///
+ /// With virtual registers, the contents of RegVTs is redundant with TLI's
+ /// getRegisterType member function, however when with physical registers
+ /// it is necessary to have a separate record of the types.
+ ///
+ SmallVector<EVT, 4> RegVTs;
+
+ /// Regs - This list holds the registers assigned to the values.
+ /// Each legal or promoted value requires one register, and each
+ /// expanded value requires multiple registers.
+ ///
+ SmallVector<unsigned, 4> Regs;
+
+ RegsForValue() {}
+
+ RegsForValue(const SmallVector<unsigned, 4> &regs,
+ EVT regvt, EVT valuevt)
+ : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
+
+ RegsForValue(const SmallVector<unsigned, 4> &regs,
+ const SmallVector<EVT, 4> &regvts,
+ const SmallVector<EVT, 4> &valuevts)
+ : ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
+
+ RegsForValue(LLVMContext &Context, const TargetLowering &tli,
+ unsigned Reg, const Type *Ty) {
+ ComputeValueVTs(tli, Ty, ValueVTs);
+
+ for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
+ EVT RegisterVT = tli.getRegisterType(Context, ValueVT);
+ for (unsigned i = 0; i != NumRegs; ++i)
+ Regs.push_back(Reg + i);
+ RegVTs.push_back(RegisterVT);
+ Reg += NumRegs;
+ }
+ }
+
+ /// areValueTypesLegal - Return true if types of all the values are legal.
+ bool areValueTypesLegal(const TargetLowering &TLI) {
+ for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT RegisterVT = RegVTs[Value];
+ if (!TLI.isTypeLegal(RegisterVT))
+ return false;
+ }
+ return true;
+ }
+
+ /// append - Add the specified values to this one.
+ void append(const RegsForValue &RHS) {
+ ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
+ RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
+ Regs.append(RHS.Regs.begin(), RHS.Regs.end());
+ }
+
+ /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
+ /// this value and returns the result as a ValueVTs value. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ /// If the Flag pointer is NULL, no flag is used.
+ SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
+ DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const;
+
+ /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+ /// specified value into the registers specified by this object. This uses
+ /// Chain/Flag as the input and updates them for the output Chain/Flag.
+ /// If the Flag pointer is NULL, no flag is used.
+ void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const;
+
+ /// AddInlineAsmOperands - Add this value to the specified inlineasm node
+ /// operand list. This adds the code marker, matching input operand index
+ /// (if applicable), and includes the number of values added into it.
+ void AddInlineAsmOperands(unsigned Kind,
+ bool HasMatching, unsigned MatchingIdx,
+ SelectionDAG &DAG,
+ std::vector<SDValue> &Ops) const;
+ };
+}
+
+/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
+/// this value and returns the result as a ValueVT value. This uses
+/// Chain/Flag as the input and updates them for the output Chain/Flag.
+/// If the Flag pointer is NULL, no flag is used.
+SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
+ FunctionLoweringInfo &FuncInfo,
+ DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ // Assemble the legal parts into the final values.
+ SmallVector<SDValue, 4> Values(ValueVTs.size());
+ SmallVector<SDValue, 8> Parts;
+ for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ // Copy the legal parts from the registers.
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
+
+ Parts.resize(NumRegs);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ SDValue P;
+ if (Flag == 0) {
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
+ } else {
+ P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
+ *Flag = P.getValue(2);
+ }
+
+ Chain = P.getValue(1);
+
+ // If the source register was virtual and if we know something about it,
+ // add an assert node.
+ if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
+ RegisterVT.isInteger() && !RegisterVT.isVector()) {
+ unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
+ if (FuncInfo.LiveOutRegInfo.size() > SlotNo) {
+ const FunctionLoweringInfo::LiveOutInfo &LOI =
+ FuncInfo.LiveOutRegInfo[SlotNo];
+
+ unsigned RegSize = RegisterVT.getSizeInBits();
+ unsigned NumSignBits = LOI.NumSignBits;
+ unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
+
+ // FIXME: We capture more information than the dag can represent. For
+ // now, just use the tightest assertzext/assertsext possible.
+ bool isSExt = true;
+ EVT FromVT(MVT::Other);
+ if (NumSignBits == RegSize)
+ isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
+ else if (NumZeroBits >= RegSize-1)
+ isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
+ else if (NumSignBits > RegSize-8)
+ isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
+ else if (NumZeroBits >= RegSize-8)
+ isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
+ else if (NumSignBits > RegSize-16)
+ isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
+ else if (NumZeroBits >= RegSize-16)
+ isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
+ else if (NumSignBits > RegSize-32)
+ isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
+ else if (NumZeroBits >= RegSize-32)
+ isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
+
+ if (FromVT != MVT::Other)
+ P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
+ RegisterVT, P, DAG.getValueType(FromVT));
+ }
+ }
+
+ Parts[i] = P;
+ }
+
+ Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
+ NumRegs, RegisterVT, ValueVT);
+ Part += NumRegs;
+ Parts.clear();
+ }
+
+ return DAG.getNode(ISD::MERGE_VALUES, dl,
+ DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
+ &Values[0], ValueVTs.size());
+}
+
+/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
+/// specified value into the registers specified by this object. This uses
+/// Chain/Flag as the input and updates them for the output Chain/Flag.
+/// If the Flag pointer is NULL, no flag is used.
+void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
+ SDValue &Chain, SDValue *Flag) const {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ // Get the list of the values's legal parts.
+ unsigned NumRegs = Regs.size();
+ SmallVector<SDValue, 8> Parts(NumRegs);
+ for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ EVT ValueVT = ValueVTs[Value];
+ unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
+ EVT RegisterVT = RegVTs[Value];
+
+ getCopyToParts(DAG, dl,
+ Val.getValue(Val.getResNo() + Value),
+ &Parts[Part], NumParts, RegisterVT);
+ Part += NumParts;
+ }
+
+ // Copy the parts into the registers.
+ SmallVector<SDValue, 8> Chains(NumRegs);
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ SDValue Part;
+ if (Flag == 0) {
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
+ } else {
+ Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
+ *Flag = Part.getValue(1);
+ }
+
+ Chains[i] = Part.getValue(0);
+ }
+
+ if (NumRegs == 1 || Flag)
+ // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
+ // flagged to it. That is the CopyToReg nodes and the user are considered
+ // a single scheduling unit. If we create a TokenFactor and return it as
+ // chain, then the TokenFactor is both a predecessor (operand) of the
+ // user as well as a successor (the TF operands are flagged to the user).
+ // c1, f1 = CopyToReg
+ // c2, f2 = CopyToReg
+ // c3 = TokenFactor c1, c2
+ // ...
+ // = op c3, ..., f2
+ Chain = Chains[NumRegs-1];
+ else
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
+}
+
+/// AddInlineAsmOperands - Add this value to the specified inlineasm node
+/// operand list. This adds the code marker and includes the number of
+/// values added into it.
+void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
+ unsigned MatchingIdx,
+ SelectionDAG &DAG,
+ std::vector<SDValue> &Ops) const {
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+ unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
+ if (HasMatching)
+ Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
+ SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
+ Ops.push_back(Res);
+
+ for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
+ EVT RegisterVT = RegVTs[Value];
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ assert(Reg < Regs.size() && "Mismatch in # registers expected");
+ Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
+ }
+ }
+}
void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
AA = &aa;
@@ -543,6 +698,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
/// consumed.
void SelectionDAGBuilder::clear() {
NodeMap.clear();
+ UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
CurDebugLoc = DebugLoc();
@@ -649,27 +805,63 @@ void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
}
}
+// getValue - Return an SDValue for the given Value.
SDValue SelectionDAGBuilder::getValue(const Value *V) {
+ // If we already have an SDValue for this value, use it. It's important
+ // to do this first, so that we don't create a CopyFromReg if we already
+ // have a regular SDValue.
SDValue &N = NodeMap[V];
if (N.getNode()) return N;
+ // If there's a virtual register allocated and initialized for this
+ // value, use it.
+ DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
+ if (It != FuncInfo.ValueMap.end()) {
+ unsigned InReg = It->second;
+ RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
+ SDValue Chain = DAG.getEntryNode();
+ return N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain,NULL);
+ }
+
+ // Otherwise create a new SDValue and remember it.
+ SDValue Val = getValueImpl(V);
+ NodeMap[V] = Val;
+ return Val;
+}
+
+/// getNonRegisterValue - Return an SDValue for the given Value, but
+/// don't look in FuncInfo.ValueMap for a virtual register.
+SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
+ // If we already have an SDValue for this value, use it.
+ SDValue &N = NodeMap[V];
+ if (N.getNode()) return N;
+
+ // Otherwise create a new SDValue and remember it.
+ SDValue Val = getValueImpl(V);
+ NodeMap[V] = Val;
+ return Val;
+}
+
+/// getValueImpl - Helper function for getValue and getMaterializedValue.
+/// Create an SDValue for the given value.
+SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (const Constant *C = dyn_cast<Constant>(V)) {
EVT VT = TLI.getValueType(V->getType(), true);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
- return N = DAG.getConstant(*CI, VT);
+ return DAG.getConstant(*CI, VT);
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
- return N = DAG.getGlobalAddress(GV, VT);
+ return DAG.getGlobalAddress(GV, getCurDebugLoc(), VT);
if (isa<ConstantPointerNull>(C))
- return N = DAG.getConstant(0, TLI.getPointerTy());
+ return DAG.getConstant(0, TLI.getPointerTy());
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
- return N = DAG.getConstantFP(*CFP, VT);
+ return DAG.getConstantFP(*CFP, VT);
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
- return N = DAG.getUNDEF(VT);
+ return DAG.getUNDEF(VT);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
@@ -757,82 +949,25 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
}
- unsigned InReg = FuncInfo.ValueMap[V];
- assert(InReg && "Value not in map!");
-
- RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
- SDValue Chain = DAG.getEntryNode();
- return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
-}
-
-/// Get the EVTs and ArgFlags collections that represent the legalized return
-/// type of the given function. This does not require a DAG or a return value,
-/// and is suitable for use before any DAGs for the function are constructed.
-static void getReturnInfo(const Type* ReturnType,
- Attributes attr, SmallVectorImpl<EVT> &OutVTs,
- SmallVectorImpl<ISD::ArgFlagsTy> &OutFlags,
- const TargetLowering &TLI,
- SmallVectorImpl<uint64_t> *Offsets = 0) {
- SmallVector<EVT, 4> ValueVTs;
- ComputeValueVTs(TLI, ReturnType, ValueVTs);
- unsigned NumValues = ValueVTs.size();
- if (NumValues == 0) return;
- unsigned Offset = 0;
-
- for (unsigned j = 0, f = NumValues; j != f; ++j) {
- EVT VT = ValueVTs[j];
- ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
-
- if (attr & Attribute::SExt)
- ExtendKind = ISD::SIGN_EXTEND;
- else if (attr & Attribute::ZExt)
- ExtendKind = ISD::ZERO_EXTEND;
-
- // FIXME: C calling convention requires the return type to be promoted to
- // at least 32-bit. But this is not necessary for non-C calling
- // conventions. The frontend should mark functions whose return values
- // require promoting with signext or zeroext attributes.
- if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
- EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
- if (VT.bitsLT(MinVT))
- VT = MinVT;
- }
-
- unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
- EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
- unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
- PartVT.getTypeForEVT(ReturnType->getContext()));
-
- // 'inreg' on function refers to return value
- ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (attr & Attribute::InReg)
- Flags.setInReg();
-
- // Propagate extension type if any
- if (attr & Attribute::SExt)
- Flags.setSExt();
- else if (attr & Attribute::ZExt)
- Flags.setZExt();
-
- for (unsigned i = 0; i < NumParts; ++i) {
- OutVTs.push_back(PartVT);
- OutFlags.push_back(Flags);
- if (Offsets)
- {
- Offsets->push_back(Offset);
- Offset += PartSize;
- }
- }
+ // If this is an instruction which fast-isel has deferred, select it now.
+ if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
+ unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
+ RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
+ SDValue Chain = DAG.getEntryNode();
+ return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
}
+
+ llvm_unreachable("Can't get register for value!");
+ return SDValue();
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
+ SmallVector<SDValue, 8> OutVals;
- if (!FLI.CanLowerReturn) {
- unsigned DemoteReg = FLI.DemoteRegister;
+ if (!FuncInfo.CanLowerReturn) {
+ unsigned DemoteReg = FuncInfo.DemoteRegister;
const Function *F = I.getParent()->getParent();
// Emit a store of the return value through the virtual register.
@@ -908,8 +1043,11 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
else if (F->paramHasAttr(0, Attribute::ZExt))
Flags.setZExt();
- for (unsigned i = 0; i < NumParts; ++i)
- Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
+ for (unsigned i = 0; i < NumParts; ++i) {
+ Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
+ /*isfixed=*/true));
+ OutVals.push_back(Parts[i]);
+ }
}
}
}
@@ -918,7 +1056,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction()->getCallingConv();
Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
- Outs, getCurDebugLoc(), DAG);
+ Outs, OutVals, getCurDebugLoc(), DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
@@ -1119,7 +1257,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
}
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
- MachineBasicBlock *BrMBB = FuncInfo.MBBMap[I.getParent()];
+ MachineBasicBlock *BrMBB = FuncInfo.MBB;
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
@@ -1269,18 +1407,10 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
MVT::Other, getControlRoot(), Cond,
DAG.getBasicBlock(CB.TrueBB));
- // If the branch was constant folded, fix up the CFG.
- if (BrCond.getOpcode() == ISD::BR) {
- SwitchBB->removeSuccessor(CB.FalseBB);
- } else {
- // Otherwise, go ahead and insert the false branch.
- if (BrCond == getControlRoot())
- SwitchBB->removeSuccessor(CB.TrueBB);
-
- if (CB.FalseBB != NextBlock)
- BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
- DAG.getBasicBlock(CB.FalseBB));
- }
+ // Insert the false branch.
+ if (CB.FalseBB != NextBlock)
+ BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
+ DAG.getBasicBlock(CB.FalseBB));
DAG.setRoot(BrCond);
}
@@ -1319,7 +1449,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
// therefore require extension or truncating.
SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
- unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
+ unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
JumpTableReg, SwitchOp);
JT.Reg = JumpTableReg;
@@ -1370,7 +1500,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
TLI.getPointerTy());
- B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
+ B.Reg = FuncInfo.CreateReg(TLI.getPointerTy());
SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
B.Reg, ShiftOp);
@@ -1402,29 +1532,41 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
unsigned Reg,
BitTestCase &B,
MachineBasicBlock *SwitchBB) {
- // Make desired shift
SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
TLI.getPointerTy());
- SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
- TLI.getPointerTy(),
- DAG.getConstant(1, TLI.getPointerTy()),
- ShiftOp);
-
- // Emit bit tests and jumps
- SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
- TLI.getPointerTy(), SwitchVal,
- DAG.getConstant(B.Mask, TLI.getPointerTy()));
- SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
- TLI.getSetCCResultType(AndOp.getValueType()),
- AndOp, DAG.getConstant(0, TLI.getPointerTy()),
- ISD::SETNE);
+ SDValue Cmp;
+ if (CountPopulation_64(B.Mask) == 1) {
+ // Testing for a single bit; just compare the shift count with what it
+ // would need to be to shift a 1 bit in that position.
+ Cmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(ShiftOp.getValueType()),
+ ShiftOp,
+ DAG.getConstant(CountTrailingZeros_64(B.Mask),
+ TLI.getPointerTy()),
+ ISD::SETEQ);
+ } else {
+ // Make desired shift
+ SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
+ TLI.getPointerTy(),
+ DAG.getConstant(1, TLI.getPointerTy()),
+ ShiftOp);
+
+ // Emit bit tests and jumps
+ SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
+ TLI.getPointerTy(), SwitchVal,
+ DAG.getConstant(B.Mask, TLI.getPointerTy()));
+ Cmp = DAG.getSetCC(getCurDebugLoc(),
+ TLI.getSetCCResultType(AndOp.getValueType()),
+ AndOp, DAG.getConstant(0, TLI.getPointerTy()),
+ ISD::SETNE);
+ }
SwitchBB->addSuccessor(B.TargetBB);
SwitchBB->addSuccessor(NextMBB);
SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
MVT::Other, getControlRoot(),
- AndCmp, DAG.getBasicBlock(B.TargetBB));
+ Cmp, DAG.getBasicBlock(B.TargetBB));
// Set NextBlock to be the MBB immediately after the current one, if any.
// This is used to avoid emitting unnecessary branches to the next block.
@@ -1441,7 +1583,7 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
}
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
- MachineBasicBlock *InvokeMBB = FuncInfo.MBBMap[I.getParent()];
+ MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
// Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
@@ -1969,7 +2111,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
}
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
- MachineBasicBlock *SwitchMBB = FuncInfo.MBBMap[SI.getParent()];
+ MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
@@ -2035,7 +2177,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
}
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
- MachineBasicBlock *IndirectBrMBB = FuncInfo.MBBMap[I.getParent()];
+ MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
// Update machine-CFG edges with unique successors.
SmallVector<BasicBlock*, 32> succs;
@@ -2245,7 +2387,6 @@ void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT SrcVT = N.getValueType();
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
@@ -2254,7 +2395,6 @@ void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
- EVT SrcVT = N.getValueType();
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
@@ -2579,7 +2719,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (CI->isZero()) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
@@ -2643,12 +2783,13 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
SDValue AllocSize = getValue(I.getArraySize());
- AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
- AllocSize,
- DAG.getConstant(TySize, AllocSize.getValueType()));
-
EVT IntPtr = TLI.getPointerTy();
- AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
+ if (AllocSize.getValueType() != IntPtr)
+ AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
+
+ AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), IntPtr,
+ AllocSize,
+ DAG.getConstant(TySize, IntPtr));
// Handle alignment. If the requested alignment is less than or equal to
// the stack alignment, ignore it. If the size is greater than or equal to
@@ -2804,8 +2945,8 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
// Add all operands of the call to the operand list.
- for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
- SDValue Op = getValue(I.getOperand(i));
+ for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+ SDValue Op = getValue(I.getArgOperand(i));
assert(TLI.isTypeLegal(Op.getValueType()) &&
"Intrinsic uses a non-legal type?");
Ops.push_back(Op);
@@ -2910,11 +3051,11 @@ SelectionDAGBuilder::implVisitBinaryAtomic(const CallInst& I,
SDValue Root = getRoot();
SDValue L =
DAG.getAtomic(Op, getCurDebugLoc(),
- getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+ getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
Root,
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- I.getOperand(1));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ I.getArgOperand(0));
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
@@ -2923,8 +3064,8 @@ SelectionDAGBuilder::implVisitBinaryAtomic(const CallInst& I,
// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
const char *
SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
@@ -2938,9 +3079,9 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
// Put the exponent in the right bit position for later addition to the
// final result:
@@ -3050,8 +3191,8 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FEXP, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3064,9 +3205,9 @@ SelectionDAGBuilder::visitLog(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f].
@@ -3160,8 +3301,8 @@ SelectionDAGBuilder::visitLog(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3174,9 +3315,9 @@ SelectionDAGBuilder::visitLog2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Get the exponent.
@@ -3269,8 +3410,8 @@ SelectionDAGBuilder::visitLog2(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG2, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3283,9 +3424,9 @@ SelectionDAGBuilder::visitLog10(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f].
@@ -3371,8 +3512,8 @@ SelectionDAGBuilder::visitLog10(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FLOG10, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3385,9 +3526,9 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
- if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+ if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(1));
+ SDValue Op = getValue(I.getArgOperand(0));
SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
@@ -3485,8 +3626,8 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FEXP2, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)));
}
setValue(&I, result);
@@ -3497,12 +3638,12 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
void
SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue result;
- const Value *Val = I.getOperand(1);
+ const Value *Val = I.getArgOperand(0);
DebugLoc dl = getCurDebugLoc();
bool IsExp10 = false;
if (getValue(Val).getValueType() == MVT::f32 &&
- getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
+ getValue(I.getArgOperand(1)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
@@ -3513,7 +3654,7 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
}
if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
- SDValue Op = getValue(I.getOperand(2));
+ SDValue Op = getValue(I.getArgOperand(1));
// Put the exponent in the right bit position for later addition to the
// final result:
@@ -3618,9 +3759,9 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
} else {
// No special expansion.
result = DAG.getNode(ISD::FPOW, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)));
}
setValue(&I, result);
@@ -3696,7 +3837,7 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const DbgValueInst &DI,
if (DV.isInlinedFnArgument(MF.getFunction()))
return false;
- MachineBasicBlock *MBB = FuncInfo.MBBMap[DI.getParent()];
+ MachineBasicBlock *MBB = FuncInfo.MBB;
if (MBB != &MF.front())
return false;
@@ -3750,11 +3891,11 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::vacopy: visitVACopy(I); return 0;
case Intrinsic::returnaddress:
setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::frameaddress:
setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::setjmp:
return "_setjmp"+!TLI.usesUnderscoreSetJmp();
@@ -3763,63 +3904,64 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::memcpy: {
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
- cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ I.getArgOperand(0), 0, I.getArgOperand(1), 0));
return 0;
}
case Intrinsic::memset: {
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- I.getOperand(1), 0));
+ I.getArgOperand(0), 0));
return 0;
}
case Intrinsic::memmove: {
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
< 256 &&
- cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
< 256 &&
"Unknown address space");
- SDValue Op1 = getValue(I.getOperand(1));
- SDValue Op2 = getValue(I.getOperand(2));
- SDValue Op3 = getValue(I.getOperand(3));
- unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
- bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
// If the source and destination are known to not be aliases, we can
// lower memmove as memcpy.
uint64_t Size = -1ULL;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
Size = C->getZExtValue();
- if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
+ if (AA->alias(I.getArgOperand(0), Size, I.getArgOperand(1), Size) ==
AliasAnalysis::NoAlias) {
DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- false, I.getOperand(1), 0, I.getOperand(2), 0));
+ false, I.getArgOperand(0), 0,
+ I.getArgOperand(1), 0));
return 0;
}
DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
- I.getOperand(1), 0, I.getOperand(2), 0));
+ I.getArgOperand(0), 0, I.getArgOperand(1), 0));
return 0;
}
case Intrinsic::dbg_declare: {
@@ -3908,7 +4050,10 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
} else {
bool createUndef = false;
// FIXME : Why not use getValue() directly ?
- SDValue &N = NodeMap[V];
+ SDValue N = NodeMap[V];
+ if (!N.getNode() && isa<Argument>(V))
+ // Check unused arguments map.
+ N = UnusedArgNodeMap[V];
if (N.getNode()) {
if (!EmitFuncArgumentDbgValue(DI, V, Variable, Offset, N)) {
SDV = DAG.getDbgValue(Variable, N.getNode(),
@@ -3956,7 +4101,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_exception: {
// Insert the EXCEPTIONADDR instruction.
- assert(FuncInfo.MBBMap[I.getParent()]->isLandingPad() &&
+ assert(FuncInfo.MBB->isLandingPad() &&
"Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[1];
@@ -3968,7 +4113,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_selector: {
- MachineBasicBlock *CallMBB = FuncInfo.MBBMap[I.getParent()];
+ MachineBasicBlock *CallMBB = FuncInfo.MBB;
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (CallMBB->isLandingPad())
AddCatchInfo(I, &MMI, CallMBB);
@@ -3978,13 +4123,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
#endif
// FIXME: Mark exception selector register as live in. Hack for PR1508.
unsigned Reg = TLI.getExceptionSelectorRegister();
- if (Reg) FuncInfo.MBBMap[I.getParent()]->addLiveIn(Reg);
+ if (Reg) FuncInfo.MBB->addLiveIn(Reg);
}
// Insert the EHSELECTION instruction.
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
SDValue Ops[2];
- Ops[0] = getValue(I.getOperand(1));
+ Ops[0] = getValue(I.getArgOperand(0));
Ops[1] = getRoot();
SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
DAG.setRoot(Op.getValue(1));
@@ -3994,7 +4139,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::eh_typeid_for: {
// Find the type id for the given typeinfo.
- GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
+ GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
Res = DAG.getConstant(TypeID, MVT::i32);
setValue(&I, Res);
@@ -4007,15 +4152,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
MVT::Other,
getControlRoot(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2))));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1))));
return 0;
case Intrinsic::eh_unwind_init:
DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
return 0;
case Intrinsic::eh_dwarf_cfa: {
- EVT VT = getValue(I.getOperand(1)).getValueType();
- SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
+ SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), dl,
TLI.getPointerTy());
SDValue Offset = DAG.getNode(ISD::ADD, dl,
TLI.getPointerTy(),
@@ -4031,7 +4175,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_sjlj_callsite: {
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
- ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
@@ -4040,13 +4184,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::eh_sjlj_setjmp: {
setValue(&I, DAG.getNode(ISD::EH_SJLJ_SETJMP, dl, MVT::i32, getRoot(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
}
case Intrinsic::eh_sjlj_longjmp: {
DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
getRoot(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0))));
return 0;
}
@@ -4072,34 +4216,34 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
EVT DestVT = TLI.getValueType(I.getType());
- const Value *Op1 = I.getOperand(1);
+ const Value *Op1 = I.getArgOperand(0);
Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
DAG.getValueType(DestVT),
DAG.getValueType(getValue(Op1).getValueType()),
- getValue(I.getOperand(2)),
- getValue(I.getOperand(3)),
+ getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2)),
Code);
setValue(&I, Res);
return 0;
}
case Intrinsic::sqrt:
setValue(&I, DAG.getNode(ISD::FSQRT, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::powi:
- setValue(&I, ExpandPowI(dl, getValue(I.getOperand(1)),
- getValue(I.getOperand(2)), DAG));
+ setValue(&I, ExpandPowI(dl, getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)), DAG));
return 0;
case Intrinsic::sin:
setValue(&I, DAG.getNode(ISD::FSIN, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::cos:
setValue(&I, DAG.getNode(ISD::FCOS, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::log:
visitLog(I);
@@ -4121,14 +4265,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::convert_to_fp16:
setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
- MVT::i16, getValue(I.getOperand(1))));
+ MVT::i16, getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::convert_from_fp16:
setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, dl,
- MVT::f32, getValue(I.getOperand(1))));
+ MVT::f32, getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::pcmarker: {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
return 0;
}
@@ -4143,23 +4287,23 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::bswap:
setValue(&I, DAG.getNode(ISD::BSWAP, dl,
- getValue(I.getOperand(1)).getValueType(),
- getValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)).getValueType(),
+ getValue(I.getArgOperand(0))));
return 0;
case Intrinsic::cttz: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTTZ, dl, Ty, Arg));
return 0;
}
case Intrinsic::ctlz: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTLZ, dl, Ty, Arg));
return 0;
}
case Intrinsic::ctpop: {
- SDValue Arg = getValue(I.getOperand(1));
+ SDValue Arg = getValue(I.getArgOperand(0));
EVT Ty = Arg.getValueType();
setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
return 0;
@@ -4173,7 +4317,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
}
case Intrinsic::stackrestore: {
- Res = getValue(I.getOperand(1));
+ Res = getValue(I.getArgOperand(0));
DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
return 0;
}
@@ -4183,8 +4327,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MachineFrameInfo *MFI = MF.getFrameInfo();
EVT PtrTy = TLI.getPointerTy();
- SDValue Src = getValue(I.getOperand(1)); // The guard's value.
- AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
+ SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
+ AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
int FI = FuncInfo.StaticAllocaMap[Slot];
MFI->setStackProtectorIndex(FI);
@@ -4201,14 +4345,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::objectsize: {
// If we don't know by now, we're never going to know.
- ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
assert(CI && "Non-constant type in __builtin_object_size?");
- SDValue Arg = getValue(I.getOperand(0));
+ SDValue Arg = getValue(I.getCalledValue());
EVT Ty = Arg.getValueType();
- if (CI->getZExtValue() == 0)
+ if (CI->isZero())
Res = DAG.getConstant(-1ULL, Ty);
else
Res = DAG.getConstant(0, Ty);
@@ -4221,14 +4365,14 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
return 0;
case Intrinsic::init_trampoline: {
- const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
+ const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
SDValue Ops[6];
Ops[0] = getRoot();
- Ops[1] = getValue(I.getOperand(1));
- Ops[2] = getValue(I.getOperand(2));
- Ops[3] = getValue(I.getOperand(3));
- Ops[4] = DAG.getSrcValue(I.getOperand(1));
+ Ops[1] = getValue(I.getArgOperand(0));
+ Ops[2] = getValue(I.getArgOperand(1));
+ Ops[3] = getValue(I.getArgOperand(2));
+ Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
Ops[5] = DAG.getSrcValue(F);
Res = DAG.getNode(ISD::TRAMPOLINE, dl,
@@ -4241,8 +4385,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::gcroot:
if (GFI) {
- const Value *Alloca = I.getOperand(1);
- const Constant *TypeMap = cast<Constant>(I.getOperand(2));
+ const Value *Alloca = I.getArgOperand(0);
+ const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
@@ -4274,9 +4418,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::prefetch: {
SDValue Ops[4];
Ops[0] = getRoot();
- Ops[1] = getValue(I.getOperand(1));
- Ops[2] = getValue(I.getOperand(2));
- Ops[3] = getValue(I.getOperand(3));
+ Ops[1] = getValue(I.getArgOperand(0));
+ Ops[2] = getValue(I.getArgOperand(1));
+ Ops[3] = getValue(I.getArgOperand(2));
DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
return 0;
}
@@ -4285,7 +4429,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Ops[6];
Ops[0] = getRoot();
for (int x = 1; x < 6; ++x)
- Ops[x] = getValue(I.getOperand(x));
+ Ops[x] = getValue(I.getArgOperand(x - 1));
DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
return 0;
@@ -4294,12 +4438,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue Root = getRoot();
SDValue L =
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
- getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+ getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
Root,
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- getValue(I.getOperand(3)),
- I.getOperand(1));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ getValue(I.getArgOperand(2)),
+ I.getArgOperand(0));
setValue(&I, L);
DAG.setRoot(L.getValue(1));
return 0;
@@ -4353,14 +4497,13 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
Args.reserve(CS.arg_size());
// Check whether the function can return without sret-demotion.
- SmallVector<EVT, 4> OutVTs;
- SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
+ SmallVector<ISD::OutputArg, 4> Outs;
SmallVector<uint64_t, 4> Offsets;
- getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
- OutVTs, OutsFlags, TLI, &Offsets);
+ GetReturnInfo(RetTy, CS.getAttributes().getRetAttributes(),
+ Outs, TLI, &Offsets);
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
- FTy->isVarArg(), OutVTs, OutsFlags, DAG);
+ FTy->isVarArg(), Outs, FTy->getContext());
SDValue DemoteStackSlot;
@@ -4453,7 +4596,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
EVT PtrVT = PVTs[0];
- unsigned NumValues = OutVTs.size();
+ unsigned NumValues = Outs.size();
SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(NumValues);
@@ -4461,7 +4604,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
DemoteStackSlot,
DAG.getConstant(Offsets[i], PtrVT));
- SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second,
+ SDValue L = DAG.getLoad(Outs[i].VT, getCurDebugLoc(), Result.second,
Add, NULL, Offsets[i], false, false, 1);
Values[i] = L;
Chains[i] = L.getValue(1);
@@ -4580,16 +4723,16 @@ static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
/// lowered like a normal call.
bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
- if (I.getNumOperands() != 4)
+ if (I.getNumArgOperands() != 3)
return false;
- const Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
+ const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
- !I.getOperand(3)->getType()->isIntegerTy() ||
+ !I.getArgOperand(2)->getType()->isIntegerTy() ||
!I.getType()->isIntegerTy())
return false;
- const ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
+ const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
// memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
@@ -4656,11 +4799,16 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
void SelectionDAGBuilder::visitCall(const CallInst &I) {
+ // Handle inline assembly differently.
+ if (isa<InlineAsm>(I.getCalledValue())) {
+ visitInlineAsm(&I);
+ return;
+ }
+
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
- const TargetIntrinsicInfo *II = TM.getIntrinsicInfo();
- if (II) {
+ if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
if (unsigned IID = II->getIntrinsicID(F)) {
RenameFn = visitIntrinsicCall(I, IID);
if (!RenameFn)
@@ -4679,51 +4827,51 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
if (!F->hasLocalLinkage() && F->hasName()) {
StringRef Name = F->getName();
if (Name == "copysign" || Name == "copysignf" || Name == "copysignl") {
- if (I.getNumOperands() == 3 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
- I.getType() == I.getOperand(2)->getType()) {
- SDValue LHS = getValue(I.getOperand(1));
- SDValue RHS = getValue(I.getOperand(2));
+ if (I.getNumArgOperands() == 2 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
+ I.getType() == I.getArgOperand(1)->getType()) {
+ SDValue LHS = getValue(I.getArgOperand(0));
+ SDValue RHS = getValue(I.getArgOperand(1));
setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
LHS.getValueType(), LHS, RHS));
return;
}
} else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType()) {
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
}
} else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
- if (I.getNumOperands() == 2 && // Basic sanity checks.
- I.getOperand(1)->getType()->isFloatingPointTy() &&
- I.getType() == I.getOperand(1)->getType() &&
+ if (I.getNumArgOperands() == 1 && // Basic sanity checks.
+ I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+ I.getType() == I.getArgOperand(0)->getType() &&
I.onlyReadsMemory()) {
- SDValue Tmp = getValue(I.getOperand(1));
+ SDValue Tmp = getValue(I.getArgOperand(0));
setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
Tmp.getValueType(), Tmp));
return;
@@ -4733,14 +4881,11 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
return;
}
}
- } else if (isa<InlineAsm>(I.getOperand(0))) {
- visitInlineAsm(&I);
- return;
}
-
+
SDValue Callee;
if (!RenameFn)
- Callee = getValue(I.getOperand(0));
+ Callee = getValue(I.getCalledValue());
else
Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
@@ -4749,210 +4894,8 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
LowerCallTo(&I, Callee, I.isTailCall());
}
-/// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
-/// this value and returns the result as a ValueVT value. This uses
-/// Chain/Flag as the input and updates them for the output Chain/Flag.
-/// If the Flag pointer is NULL, no flag is used.
-SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
- // Assemble the legal parts into the final values.
- SmallVector<SDValue, 4> Values(ValueVTs.size());
- SmallVector<SDValue, 8> Parts;
- for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- // Copy the legal parts from the registers.
- EVT ValueVT = ValueVTs[Value];
- unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
- EVT RegisterVT = RegVTs[Value];
-
- Parts.resize(NumRegs);
- for (unsigned i = 0; i != NumRegs; ++i) {
- SDValue P;
- if (Flag == 0) {
- P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
- } else {
- P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
- *Flag = P.getValue(2);
- }
-
- Chain = P.getValue(1);
-
- // If the source register was virtual and if we know something about it,
- // add an assert node.
- if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
- RegisterVT.isInteger() && !RegisterVT.isVector()) {
- unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
- if (FLI.LiveOutRegInfo.size() > SlotNo) {
- FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
-
- unsigned RegSize = RegisterVT.getSizeInBits();
- unsigned NumSignBits = LOI.NumSignBits;
- unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
-
- // FIXME: We capture more information than the dag can represent. For
- // now, just use the tightest assertzext/assertsext possible.
- bool isSExt = true;
- EVT FromVT(MVT::Other);
- if (NumSignBits == RegSize)
- isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
- else if (NumZeroBits >= RegSize-1)
- isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
- else if (NumSignBits > RegSize-8)
- isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
- else if (NumZeroBits >= RegSize-8)
- isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
- else if (NumSignBits > RegSize-16)
- isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
- else if (NumZeroBits >= RegSize-16)
- isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
- else if (NumSignBits > RegSize-32)
- isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
- else if (NumZeroBits >= RegSize-32)
- isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
-
- if (FromVT != MVT::Other)
- P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
- RegisterVT, P, DAG.getValueType(FromVT));
- }
- }
-
- Parts[i] = P;
- }
-
- Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
- NumRegs, RegisterVT, ValueVT);
- Part += NumRegs;
- Parts.clear();
- }
-
- return DAG.getNode(ISD::MERGE_VALUES, dl,
- DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
- &Values[0], ValueVTs.size());
-}
-
-/// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
-/// specified value into the registers specified by this object. This uses
-/// Chain/Flag as the input and updates them for the output Chain/Flag.
-/// If the Flag pointer is NULL, no flag is used.
-void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
- SDValue &Chain, SDValue *Flag) const {
- // Get the list of the values's legal parts.
- unsigned NumRegs = Regs.size();
- SmallVector<SDValue, 8> Parts(NumRegs);
- for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
- EVT ValueVT = ValueVTs[Value];
- unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
- EVT RegisterVT = RegVTs[Value];
-
- getCopyToParts(DAG, dl,
- Val.getValue(Val.getResNo() + Value),
- &Parts[Part], NumParts, RegisterVT);
- Part += NumParts;
- }
-
- // Copy the parts into the registers.
- SmallVector<SDValue, 8> Chains(NumRegs);
- for (unsigned i = 0; i != NumRegs; ++i) {
- SDValue Part;
- if (Flag == 0) {
- Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
- } else {
- Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
- *Flag = Part.getValue(1);
- }
-
- Chains[i] = Part.getValue(0);
- }
-
- if (NumRegs == 1 || Flag)
- // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
- // flagged to it. That is the CopyToReg nodes and the user are considered
- // a single scheduling unit. If we create a TokenFactor and return it as
- // chain, then the TokenFactor is both a predecessor (operand) of the
- // user as well as a successor (the TF operands are flagged to the user).
- // c1, f1 = CopyToReg
- // c2, f2 = CopyToReg
- // c3 = TokenFactor c1, c2
- // ...
- // = op c3, ..., f2
- Chain = Chains[NumRegs-1];
- else
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
-}
-
-/// AddInlineAsmOperands - Add this value to the specified inlineasm node
-/// operand list. This adds the code marker and includes the number of
-/// values added into it.
-void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
- unsigned MatchingIdx,
- SelectionDAG &DAG,
- std::vector<SDValue> &Ops) const {
- unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
- if (HasMatching)
- Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
- SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
- Ops.push_back(Res);
-
- for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
- unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
- EVT RegisterVT = RegVTs[Value];
- for (unsigned i = 0; i != NumRegs; ++i) {
- assert(Reg < Regs.size() && "Mismatch in # registers expected");
- Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
- }
- }
-}
-
-/// isAllocatableRegister - If the specified register is safe to allocate,
-/// i.e. it isn't a stack pointer or some other special register, return the
-/// register class for the register. Otherwise, return null.
-static const TargetRegisterClass *
-isAllocatableRegister(unsigned Reg, MachineFunction &MF,
- const TargetLowering &TLI,
- const TargetRegisterInfo *TRI) {
- EVT FoundVT = MVT::Other;
- const TargetRegisterClass *FoundRC = 0;
- for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
- E = TRI->regclass_end(); RCI != E; ++RCI) {
- EVT ThisVT = MVT::Other;
-
- const TargetRegisterClass *RC = *RCI;
- // If none of the value types for this register class are valid, we
- // can't use it. For example, 64-bit reg classes on 32-bit targets.
- for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
- I != E; ++I) {
- if (TLI.isTypeLegal(*I)) {
- // If we have already found this register in a different register class,
- // choose the one with the largest VT specified. For example, on
- // PowerPC, we favor f64 register classes over f32.
- if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
- ThisVT = *I;
- break;
- }
- }
- }
-
- if (ThisVT == MVT::Other) continue;
-
- // NOTE: This isn't ideal. In particular, this might allocate the
- // frame pointer in functions that need it (due to them not being taken
- // out of allocation, because a variable sized allocation hasn't been seen
- // yet). This is a slight code pessimization, but should still work.
- for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
- E = RC->allocation_order_end(MF); I != E; ++I)
- if (*I == Reg) {
- // We found a matching register class. Keep looking at others in case
- // we find one with larger registers that this physreg is also in.
- FoundRC = RC;
- FoundVT = ThisVT;
- break;
- }
- }
- return FoundRC;
-}
-
-
namespace llvm {
+
/// AsmOperandInfo - This contains information for each constraint that we are
/// lowering.
class LLVM_LIBRARY_VISIBILITY SDISelAsmOperandInfo :
@@ -5041,8 +4984,56 @@ private:
Regs.insert(*Aliases);
}
};
+
} // end llvm namespace.
+/// isAllocatableRegister - If the specified register is safe to allocate,
+/// i.e. it isn't a stack pointer or some other special register, return the
+/// register class for the register. Otherwise, return null.
+static const TargetRegisterClass *
+isAllocatableRegister(unsigned Reg, MachineFunction &MF,
+ const TargetLowering &TLI,
+ const TargetRegisterInfo *TRI) {
+ EVT FoundVT = MVT::Other;
+ const TargetRegisterClass *FoundRC = 0;
+ for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
+ E = TRI->regclass_end(); RCI != E; ++RCI) {
+ EVT ThisVT = MVT::Other;
+
+ const TargetRegisterClass *RC = *RCI;
+ // If none of the value types for this register class are valid, we
+ // can't use it. For example, 64-bit reg classes on 32-bit targets.
+ for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
+ I != E; ++I) {
+ if (TLI.isTypeLegal(*I)) {
+ // If we have already found this register in a different register class,
+ // choose the one with the largest VT specified. For example, on
+ // PowerPC, we favor f64 register classes over f32.
+ if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
+ ThisVT = *I;
+ break;
+ }
+ }
+ }
+
+ if (ThisVT == MVT::Other) continue;
+
+ // NOTE: This isn't ideal. In particular, this might allocate the
+ // frame pointer in functions that need it (due to them not being taken
+ // out of allocation, because a variable sized allocation hasn't been seen
+ // yet). This is a slight code pessimization, but should still work.
+ for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
+ E = RC->allocation_order_end(MF); I != E; ++I)
+ if (*I == Reg) {
+ // We found a matching register class. Keep looking at others in case
+ // we find one with larger registers that this physreg is also in.
+ FoundRC = RC;
+ FoundVT = ThisVT;
+ break;
+ }
+ }
+ return FoundRC;
+}
/// GetRegistersForValue - Assign registers (virtual or physical) for the
/// specified operand. We prefer to assign virtual registers, to allow the
@@ -5154,7 +5145,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
}
}
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
+ OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
return;
@@ -5172,7 +5163,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
for (; NumRegs; --NumRegs)
Regs.push_back(RegInfo.createVirtualRegister(RC));
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
+ OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
return;
}
@@ -5215,7 +5206,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
for (unsigned i = RegStart; i != RegEnd; ++i)
Regs.push_back(RegClassRegs[i]);
- OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
+ OpInfo.AssignedRegs = RegsForValue(Regs, *RC->vt_begin(),
OpInfo.ConstraintVT);
OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
return;
@@ -5332,7 +5323,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
+ TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
// If this is a memory input, and if the operand is not indirect, do what we
// need to to provide an address for the memory input.
@@ -5406,6 +5397,10 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
+ // Remember the AlignStack bit as operand 3.
+ AsmNodeOperands.push_back(DAG.getTargetConstant(IA->isAlignStack() ? 1 : 0,
+ MVT::i1));
+
// Loop over all of the inputs, copying the operand values into the
// appropriate registers and processing the output regs.
RegsForValue RetValRegs;
@@ -5497,7 +5492,6 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
RegsForValue MatchedRegs;
- MatchedRegs.TLI = &TLI;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
MatchedRegs.RegVTs.push_back(RegVT);
@@ -5529,13 +5523,15 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
break;
}
- if (OpInfo.ConstraintType == TargetLowering::C_Other) {
- assert(!OpInfo.isIndirect &&
- "Don't know how to handle indirect other inputs yet!");
+ // Treat indirect 'X' constraint as memory.
+ if (OpInfo.ConstraintType == TargetLowering::C_Other &&
+ OpInfo.isIndirect)
+ OpInfo.ConstraintType = TargetLowering::C_Memory;
+ if (OpInfo.ConstraintType == TargetLowering::C_Other) {
std::vector<SDValue> Ops;
TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
- hasMemory, Ops, DAG);
+ Ops, DAG);
if (Ops.empty())
report_fatal_error("Invalid operand for inline asm constraint '" +
Twine(OpInfo.ConstraintCode) + "'!");
@@ -5570,7 +5566,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Copy the input into the appropriate registers.
if (OpInfo.AssignedRegs.Regs.empty() ||
- !OpInfo.AssignedRegs.areValueTypesLegal())
+ !OpInfo.AssignedRegs.areValueTypesLegal(TLI))
report_fatal_error("Couldn't allocate input reg for constraint '" +
Twine(OpInfo.ConstraintCode) + "'!");
@@ -5595,7 +5591,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
}
// Finish up input operands. Set the input chain and add the flag last.
- AsmNodeOperands[0] = Chain;
+ AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
@@ -5606,7 +5602,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// If this asm returns a register value, copy the result from that register
// and set it as the value of the call.
if (!RetValRegs.Regs.empty()) {
- SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
Chain, &Flag);
// FIXME: Why don't we do this for inline asms with MRVs?
@@ -5646,7 +5642,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
const Value *Ptr = IndirectStoresToEmit[i].second;
- SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
+ SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
Chain, &Flag);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
}
@@ -5672,14 +5668,16 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(0))));
}
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
+ const TargetData &TD = *TLI.getTargetData();
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
getRoot(), getValue(I.getOperand(0)),
- DAG.getSrcValue(I.getOperand(0)));
+ DAG.getSrcValue(I.getOperand(0)),
+ TD.getABITypeAlignment(I.getType()));
setValue(&I, V);
DAG.setRoot(V.getValue(1));
}
@@ -5687,17 +5685,17 @@ void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(1))));
+ getValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(0))));
}
void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
MVT::Other, getRoot(),
- getValue(I.getOperand(1)),
- getValue(I.getOperand(2)),
- DAG.getSrcValue(I.getOperand(1)),
- DAG.getSrcValue(I.getOperand(2))));
+ getValue(I.getArgOperand(0)),
+ getValue(I.getArgOperand(1)),
+ DAG.getSrcValue(I.getArgOperand(0)),
+ DAG.getSrcValue(I.getArgOperand(1))));
}
/// TargetLowering::LowerCallTo - This is the default LowerCallTo
@@ -5715,6 +5713,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
DebugLoc dl) const {
// Handle all of the outgoing arguments.
SmallVector<ISD::OutputArg, 32> Outs;
+ SmallVector<SDValue, 32> OutVals;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
@@ -5768,13 +5767,15 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
for (unsigned j = 0; j != NumParts; ++j) {
// if it isn't first piece, alignment must be 1
- ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
+ ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
+ i < NumFixedArgs);
if (NumParts > 1 && j == 0)
MyFlags.Flags.setSplit();
else if (j != 0)
MyFlags.Flags.setOrigAlign(1);
Outs.push_back(MyFlags);
+ OutVals.push_back(Parts[j]);
}
}
}
@@ -5803,7 +5804,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
SmallVector<SDValue, 4> InVals;
Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, Ins, dl, DAG, InVals);
+ Outs, OutVals, Ins, dl, DAG, InVals);
// Verify that the target's LowerCall behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
@@ -5876,7 +5877,7 @@ SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
void
SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
- SDValue Op = getValue(V);
+ SDValue Op = getNonRegisterValue(V);
assert((Op.getOpcode() != ISD::CopyFromReg ||
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
"Copy from a reg to the same reg!");
@@ -5894,21 +5895,16 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// If this is the entry block, emit arguments.
const Function &F = *LLVMBB->getParent();
SelectionDAG &DAG = SDB->DAG;
- SDValue OldRoot = DAG.getRoot();
DebugLoc dl = SDB->getCurDebugLoc();
const TargetData *TD = TLI.getTargetData();
SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion.
- SmallVector<EVT, 4> OutVTs;
- SmallVector<ISD::ArgFlagsTy, 4> OutsFlags;
- getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
- OutVTs, OutsFlags, TLI);
- FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
-
- FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(),
- OutVTs, OutsFlags, DAG);
- if (!FLI.CanLowerReturn) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ if (!FuncInfo->CanLowerReturn) {
// Put in an sret pointer parameter before all the other parameters.
SmallVector<EVT, 1> ValueVTs;
ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
@@ -6002,7 +5998,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// Set up the argument values.
unsigned i = 0;
Idx = 1;
- if (!FLI.CanLowerReturn) {
+ if (!FuncInfo->CanLowerReturn) {
// Create a virtual register for the sret pointer, and put in a copy
// from the sret argument into it.
SmallVector<EVT, 1> ValueVTs;
@@ -6016,7 +6012,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
MachineFunction& MF = SDB->DAG.getMachineFunction();
MachineRegisterInfo& RegInfo = MF.getRegInfo();
unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
- FLI.DemoteRegister = SRetReg;
+ FuncInfo->DemoteRegister = SRetReg;
NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(),
SRetReg, ArgValue);
DAG.setRoot(NewRoot);
@@ -6032,6 +6028,12 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
+
+ // If this argument is unused then remember its value. It is used to generate
+ // debugging information.
+ if (I->use_empty() && NumValues)
+ SDB->setUnusedArgValue(I, InVals[i]);
+
for (unsigned Value = 0; Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
@@ -6112,17 +6114,20 @@ SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
unsigned &RegOut = ConstantsOut[C];
if (RegOut == 0) {
- RegOut = FuncInfo.CreateRegForValue(C);
+ RegOut = FuncInfo.CreateRegs(C->getType());
CopyValueToVirtualRegister(C, RegOut);
}
Reg = RegOut;
} else {
- Reg = FuncInfo.ValueMap[PHIOp];
- if (Reg == 0) {
+ DenseMap<const Value *, unsigned>::iterator I =
+ FuncInfo.ValueMap.find(PHIOp);
+ if (I != FuncInfo.ValueMap.end())
+ Reg = I->second;
+ else {
assert(isa<AllocaInst>(PHIOp) &&
FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
"Didn't codegen value into a register!??");
- Reg = FuncInfo.CreateRegForValue(PHIOp);
+ Reg = FuncInfo.CreateRegs(PHIOp->getType());
CopyValueToVirtualRegister(PHIOp, Reg);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 3fcd4b9..46733d6 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -88,6 +88,10 @@ class SelectionDAGBuilder {
DebugLoc CurDebugLoc;
DenseMap<const Value*, SDValue> NodeMap;
+
+ /// UnusedArgNodeMap - Maps argument value for unused arguments. This is used
+ /// to preserve debug information for incoming arguments.
+ DenseMap<const Value*, SDValue> UnusedArgNodeMap;
public:
/// PendingLoads - Loads are not emitted to the program immediately. We bunch
@@ -342,6 +346,8 @@ public:
void visit(unsigned Opcode, const User &I);
SDValue getValue(const Value *V);
+ SDValue getNonRegisterValue(const Value *V);
+ SDValue getValueImpl(const Value *V);
void setValue(const Value *V, SDValue NewN) {
SDValue &N = NodeMap[V];
@@ -349,6 +355,12 @@ public:
N = NewN;
}
+ void setUnusedArgValue(const Value *V, SDValue NewN) {
+ SDValue &N = UnusedArgNodeMap[V];
+ assert(N.getNode() == 0 && "Already set a value for this node!");
+ N = NewN;
+ }
+
void GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs);
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 65b8d4f..08ba548 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -14,7 +14,7 @@
#define DEBUG_TYPE "isel"
#include "ScheduleDAGSDNodes.h"
#include "SelectionDAGBuilder.h"
-#include "FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/DebugInfo.h"
@@ -171,7 +171,7 @@ TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
SelectionDAGISel::SelectionDAGISel(const TargetMachine &tm, CodeGenOpt::Level OL) :
MachineFunctionPass(&ID), TM(tm), TLI(*tm.getTargetLowering()),
FuncInfo(new FunctionLoweringInfo(TLI)),
- CurDAG(new SelectionDAG(tm, *FuncInfo)),
+ CurDAG(new SelectionDAG(tm)),
SDB(new SelectionDAGBuilder(*CurDAG, *FuncInfo, OL)),
GFI(),
OptLevel(OL),
@@ -244,7 +244,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
CurDAG->init(*MF);
- FuncInfo->set(Fn, *MF, EnableFastISel);
+ FuncInfo->set(Fn, *MF);
SDB->init(GFI, *AA);
SelectAllBasicBlocks(Fn);
@@ -300,7 +300,11 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
for (MachineBasicBlock::const_iterator
II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
- if (II->isInlineAsm() || (TID.isCall() && !TID.isReturn())) {
+
+ // Operand 1 of an inline asm instruction indicates whether the asm
+ // needs stack or not.
+ if ((II->isInlineAsm() && II->getOperand(1).getImm()) ||
+ (TID.isCall() && !TID.isReturn())) {
MFI->setHasCalls(true);
goto done;
}
@@ -312,6 +316,26 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Determine if there is a call to setjmp in the machine function.
MF->setCallsSetJmp(FunctionCallsSetJmp(&Fn));
+ // Replace forward-declared registers with the registers containing
+ // the desired value.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ for (DenseMap<unsigned, unsigned>::iterator
+ I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
+ I != E; ++I) {
+ unsigned From = I->first;
+ unsigned To = I->second;
+ // If To is also scheduled to be replaced, find what its ultimate
+ // replacement is.
+ for (;;) {
+ DenseMap<unsigned, unsigned>::iterator J =
+ FuncInfo->RegFixups.find(To);
+ if (J == E) break;
+ To = J->second;
+ }
+ // Replace it.
+ MRI.replaceRegWith(From, To);
+ }
+
// Release function-specific state. SDB and CurDAG are already cleared
// at this point.
FuncInfo->clear();
@@ -319,10 +343,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
return true;
}
-MachineBasicBlock *
-SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB,
- const BasicBlock *LLVMBB,
- BasicBlock::const_iterator Begin,
+void
+SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall) {
// Lower all of the non-terminator instructions. If a call is emitted
@@ -337,7 +359,7 @@ SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB,
SDB->clear();
// Final step, emit the lowered DAG as machine code.
- return CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
}
namespace {
@@ -372,102 +394,6 @@ public:
};
}
-/// TrivialTruncElim - Eliminate some trivial nops that can result from
-/// ShrinkDemandedOps: (trunc (ext n)) -> n.
-static bool TrivialTruncElim(SDValue Op,
- TargetLowering::TargetLoweringOpt &TLO) {
- SDValue N0 = Op.getOperand(0);
- EVT VT = Op.getValueType();
- if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
- N0.getOpcode() == ISD::SIGN_EXTEND ||
- N0.getOpcode() == ISD::ANY_EXTEND) &&
- N0.getOperand(0).getValueType() == VT) {
- return TLO.CombineTo(Op, N0.getOperand(0));
- }
- return false;
-}
-
-/// ShrinkDemandedOps - A late transformation pass that shrink expressions
-/// using TargetLowering::TargetLoweringOpt::ShrinkDemandedOp. It converts
-/// x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
-void SelectionDAGISel::ShrinkDemandedOps() {
- SmallVector<SDNode*, 128> Worklist;
- SmallPtrSet<SDNode*, 128> InWorklist;
-
- // Add all the dag nodes to the worklist.
- Worklist.reserve(CurDAG->allnodes_size());
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ++I) {
- Worklist.push_back(I);
- InWorklist.insert(I);
- }
-
- TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true, true);
- while (!Worklist.empty()) {
- SDNode *N = Worklist.pop_back_val();
- InWorklist.erase(N);
-
- if (N->use_empty() && N != CurDAG->getRoot().getNode()) {
- // Deleting this node may make its operands dead, add them to the worklist
- // if they aren't already there.
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (InWorklist.insert(N->getOperand(i).getNode()))
- Worklist.push_back(N->getOperand(i).getNode());
-
- CurDAG->DeleteNode(N);
- continue;
- }
-
- // Run ShrinkDemandedOp on scalar binary operations.
- if (N->getNumValues() != 1 ||
- !N->getValueType(0).isSimple() || !N->getValueType(0).isInteger())
- continue;
-
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Demanded = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero, KnownOne;
- if (!TLI.SimplifyDemandedBits(SDValue(N, 0), Demanded,
- KnownZero, KnownOne, TLO) &&
- (N->getOpcode() != ISD::TRUNCATE ||
- !TrivialTruncElim(SDValue(N, 0), TLO)))
- continue;
-
- // Revisit the node.
- assert(!InWorklist.count(N) && "Already in worklist");
- Worklist.push_back(N);
- InWorklist.insert(N);
-
- // Replace the old value with the new one.
- DEBUG(errs() << "\nShrinkDemandedOps replacing ";
- TLO.Old.getNode()->dump(CurDAG);
- errs() << "\nWith: ";
- TLO.New.getNode()->dump(CurDAG);
- errs() << '\n');
-
- if (InWorklist.insert(TLO.New.getNode()))
- Worklist.push_back(TLO.New.getNode());
-
- SDOPsWorkListRemover DeadNodes(Worklist, InWorklist);
- CurDAG->ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
-
- if (!TLO.Old.getNode()->use_empty()) continue;
-
- for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands();
- i != e; ++i) {
- SDNode *OpNode = TLO.Old.getNode()->getOperand(i).getNode();
- if (OpNode->hasOneUse()) {
- // Add OpNode to the end of the list to revisit.
- DeadNodes.RemoveFromWorklist(OpNode);
- Worklist.push_back(OpNode);
- InWorklist.insert(OpNode);
- }
- }
-
- DeadNodes.RemoveFromWorklist(TLO.Old.getNode());
- CurDAG->DeleteNode(TLO.Old.getNode());
- }
-}
-
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 128> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
@@ -522,7 +448,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
} while (!Worklist.empty());
}
-MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
+void SelectionDAGISel::CodeGenAndEmitDAG() {
std::string GroupName;
if (TimePassesIsEnabled)
GroupName = "Instruction Selection and Scheduling";
@@ -531,23 +457,19 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs ||
ViewSUnitDAGs)
BlockName = MF->getFunction()->getNameStr() + ":" +
- BB->getBasicBlock()->getNameStr();
+ FuncInfo->MBB->getBasicBlock()->getNameStr();
- DEBUG(dbgs() << "Initial selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
// Run the DAG combiner in pre-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 1", GroupName);
- CurDAG->Combine(Unrestricted, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
CurDAG->Combine(Unrestricted, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized lowered selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized lowered selection DAG:\n"; CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
@@ -555,44 +477,36 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
BlockName);
bool Changed;
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization", GroupName);
- Changed = CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Type-legalized selection DAG:\n"; CurDAG->dump());
if (Changed) {
if (ViewDAGCombineLT)
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize types", GroupName);
- CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize types", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n";
+ CurDAG->dump());
}
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Vector Legalization", GroupName);
- Changed = CurDAG->LegalizeVectors();
- } else {
+ {
+ NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeVectors();
}
if (Changed) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization 2", GroupName);
- CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
CurDAG->LegalizeTypes();
}
@@ -600,95 +514,79 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) {
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize vectors", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n";
+ CurDAG->dump());
}
if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Legalization", GroupName);
- CurDAG->Legalize(OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
CurDAG->Legalize(OptLevel);
}
- DEBUG(dbgs() << "Legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Legalized selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 2", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized legalized selection DAG:\n"; CurDAG->dump());
- if (OptLevel != CodeGenOpt::None) {
- ShrinkDemandedOps();
+ if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
- }
if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Selection", GroupName);
- DoInstructionSelection();
- } else {
+ {
+ NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
DoInstructionSelection();
}
- DEBUG(dbgs() << "Selected selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Selected selection DAG:\n"; CurDAG->dump());
if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
// Schedule machine code.
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling", GroupName);
- Scheduler->Run(CurDAG, BB, BB->end());
- } else {
- Scheduler->Run(CurDAG, BB, BB->end());
+ {
+ NamedRegionTimer T("Instruction Scheduling", GroupName,
+ TimePassesIsEnabled);
+ Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt);
}
if (ViewSUnitDAGs) Scheduler->viewGraph();
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Creation", GroupName);
- BB = Scheduler->EmitSchedule();
- } else {
- BB = Scheduler->EmitSchedule();
+ {
+ NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
+
+ FuncInfo->MBB = Scheduler->EmitSchedule();
+ FuncInfo->InsertPt = Scheduler->InsertPos;
}
// Free the scheduler state.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName);
- delete Scheduler;
- } else {
+ {
+ NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
+ TimePassesIsEnabled);
delete Scheduler;
}
// Free the SelectionDAG state, now that we're finished with it.
CurDAG->clear();
-
- return BB;
}
void SelectionDAGISel::DoInstructionSelection() {
@@ -750,21 +648,22 @@ void SelectionDAGISel::DoInstructionSelection() {
/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
/// do other setup for EH landing-pad blocks.
-void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) {
+void SelectionDAGISel::PrepareEHLandingPad() {
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
- MCSymbol *Label = MF->getMMI().addLandingPad(BB);
+ MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
- BuildMI(BB, SDB->getCurDebugLoc(), II).addSym(Label);
+ BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
+ .addSym(Label);
// Mark exception register as live in.
unsigned Reg = TLI.getExceptionAddressRegister();
- if (Reg) BB->addLiveIn(Reg);
+ if (Reg) FuncInfo->MBB->addLiveIn(Reg);
// Mark exception selector register as live in.
Reg = TLI.getExceptionSelectorRegister();
- if (Reg) BB->addLiveIn(Reg);
+ if (Reg) FuncInfo->MBB->addLiveIn(Reg);
// FIXME: Hack around an exception handling flaw (PR1508): the personality
// function and list of typeids logically belong to the invoke (or, if you
@@ -777,7 +676,7 @@ void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) {
// in exceptions not being caught because no typeids are associated with
// the invoke. This may not be the only way things can go wrong, but it
// is the only way we try to work around for the moment.
- const BasicBlock *LLVMBB = BB->getBasicBlock();
+ const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock();
const BranchInst *Br = dyn_cast<BranchInst>(LLVMBB->getTerminator());
if (Br && Br->isUnconditional()) { // Critical edge?
@@ -796,83 +695,100 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
if (EnableFastISel)
- FastIS = TLI.createFastISel(*MF, FuncInfo->ValueMap, FuncInfo->MBBMap,
- FuncInfo->StaticAllocaMap,
- FuncInfo->PHINodesToUpdate
-#ifndef NDEBUG
- , FuncInfo->CatchInfoLost
-#endif
- );
+ FastIS = TLI.createFastISel(*FuncInfo);
// Iterate over all basic blocks in the function.
for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
const BasicBlock *LLVMBB = &*I;
- MachineBasicBlock *BB = FuncInfo->MBBMap[LLVMBB];
+ FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
+ FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI();
BasicBlock::const_iterator const End = LLVMBB->end();
- BasicBlock::const_iterator BI = Begin;
+ BasicBlock::const_iterator BI = End;
+ FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI();
+
+ // Setup an EH landing-pad block.
+ if (FuncInfo->MBB->isLandingPad())
+ PrepareEHLandingPad();
+
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
- // Setup an EH landing-pad block.
- if (BB->isLandingPad())
- PrepareEHLandingPad(BB);
-
// Before doing SelectionDAG ISel, see if FastISel has been requested.
if (FastIS) {
+ FastIS->startNewBlock();
+
// Emit code for any incoming arguments. This must happen before
// beginning FastISel on the entry block.
if (LLVMBB == &Fn.getEntryBlock()) {
CurDAG->setRoot(SDB->getControlRoot());
SDB->clear();
- BB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
+
+ // If we inserted any instructions at the beginning, make a note of
+ // where they are, so we can be sure to emit subsequent instructions
+ // after them.
+ if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
+ FastIS->setLastLocalValue(llvm::prior(FuncInfo->InsertPt));
+ else
+ FastIS->setLastLocalValue(0);
}
- FastIS->startNewBlock(BB);
+
// Do FastISel on as many instructions as possible.
- for (; BI != End; ++BI) {
+ for (; BI != Begin; --BI) {
+ const Instruction *Inst = llvm::prior(BI);
+
+ // If we no longer require this instruction, skip it.
+ if (!Inst->mayWriteToMemory() &&
+ !isa<TerminatorInst>(Inst) &&
+ !isa<DbgInfoIntrinsic>(Inst) &&
+ !FuncInfo->isExportedInst(Inst))
+ continue;
+
+ // Bottom-up: reset the insert pos at the top, after any local-value
+ // instructions.
+ FastIS->recomputeInsertPt();
+
// Try to select the instruction with FastISel.
- if (FastIS->SelectInstruction(BI))
+ if (FastIS->SelectInstruction(Inst))
continue;
// Then handle certain instructions as single-LLVM-Instruction blocks.
- if (isa<CallInst>(BI)) {
+ if (isa<CallInst>(Inst)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel missed call: ";
- BI->dump();
+ Inst->dump();
}
- if (!BI->getType()->isVoidTy() && !BI->use_empty()) {
- unsigned &R = FuncInfo->ValueMap[BI];
+ if (!Inst->getType()->isVoidTy() && !Inst->use_empty()) {
+ unsigned &R = FuncInfo->ValueMap[Inst];
if (!R)
- R = FuncInfo->CreateRegForValue(BI);
+ R = FuncInfo->CreateRegs(Inst->getType());
}
bool HadTailCall = false;
- BB = SelectBasicBlock(BB, LLVMBB, BI, llvm::next(BI), HadTailCall);
+ SelectBasicBlock(Inst, BI, HadTailCall);
// If the call was emitted as a tail call, we're done with the block.
if (HadTailCall) {
- BI = End;
+ --BI;
break;
}
- // If the instruction was codegen'd with multiple blocks,
- // inform the FastISel object where to resume inserting.
- FastIS->setCurrentBlock(BB);
continue;
}
// Otherwise, give up on FastISel for the rest of the block.
// For now, be a little lenient about non-branch terminators.
- if (!isa<TerminatorInst>(BI) || isa<BranchInst>(BI)) {
+ if (!isa<TerminatorInst>(Inst) || isa<BranchInst>(Inst)) {
++NumFastIselFailures;
if (EnableFastISelVerbose || EnableFastISelAbort) {
dbgs() << "FastISel miss: ";
- BI->dump();
+ Inst->dump();
}
if (EnableFastISelAbort)
// The "fast" selector couldn't handle something and bailed.
@@ -881,17 +797,17 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
break;
}
+
+ FastIS->recomputeInsertPt();
}
// Run SelectionDAG instruction selection on the remainder of the block
// not handled by FastISel. If FastISel is not run, this is the entire
// block.
- if (BI != End) {
- bool HadTailCall;
- BB = SelectBasicBlock(BB, LLVMBB, BI, End, HadTailCall);
- }
+ bool HadTailCall;
+ SelectBasicBlock(Begin, BI, HadTailCall);
- FinishBasicBlock(BB);
+ FinishBasicBlock();
FuncInfo->PHINodesToUpdate.clear();
}
@@ -899,11 +815,11 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
}
void
-SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
+SelectionDAGISel::FinishBasicBlock() {
DEBUG(dbgs() << "Total amount of phi nodes to update: "
- << FuncInfo->PHINodesToUpdate.size() << "\n");
- DEBUG(for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
+ << FuncInfo->PHINodesToUpdate.size() << "\n";
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
dbgs() << "Node " << i << " : ("
<< FuncInfo->PHINodesToUpdate[i].first
<< ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
@@ -917,11 +833,11 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (!BB->isSuccessor(PHI->getParent()))
+ if (!FuncInfo->MBB->isSuccessor(PHI->getParent()))
continue;
PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
return;
}
@@ -930,33 +846,35 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
// Lower header first, if it wasn't already lowered
if (!SDB->BitTestCases[i].Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->BitTestCases[i].Parent;
+ FuncInfo->MBB = SDB->BitTestCases[i].Parent;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
- SDB->visitBitTestHeader(SDB->BitTestCases[i], BB);
+ SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- BB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
}
for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->BitTestCases[i].Cases[j].ThisBB;
+ FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
if (j+1 != ej)
SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
- BB);
+ FuncInfo->MBB);
else
SDB->visitBitTestCase(SDB->BitTestCases[i].Default,
SDB->BitTestCases[i].Reg,
SDB->BitTestCases[i].Cases[j],
- BB);
+ FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- BB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
}
// Update PHI Nodes
@@ -1001,22 +919,24 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
// Lower header first, if it wasn't already lowered
if (!SDB->JTCases[i].first.Emitted) {
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->JTCases[i].first.HeaderBB;
+ FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first,
- BB);
+ FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- BB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
}
// Set the current basic block to the mbb we wish to insert the code into
- BB = SDB->JTCases[i].second.MBB;
+ FuncInfo->MBB = SDB->JTCases[i].second.MBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Emit the code
SDB->visitJumpTable(SDB->JTCases[i].second);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- BB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
// Update PHI Nodes
for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
@@ -1034,11 +954,11 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
(MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB));
}
// JT BB. Just iterate over successors here
- if (BB->isSuccessor(PHIBB)) {
+ if (FuncInfo->MBB->isSuccessor(PHIBB)) {
PHI->addOperand
(MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second,
false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
}
}
@@ -1050,10 +970,10 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first;
assert(PHI->isPHI() &&
"This is not a machine PHI node that we are updating!");
- if (BB->isSuccessor(PHI->getParent())) {
+ if (FuncInfo->MBB->isSuccessor(PHI->getParent())) {
PHI->addOperand(
MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false));
- PHI->addOperand(MachineOperand::CreateMBB(BB));
+ PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB));
}
}
@@ -1061,7 +981,8 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
// additional DAGs necessary.
for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) {
// Set the current basic block to the mbb we wish to insert the code into
- MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB;
+ MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB;
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
// Determine the unique successors.
SmallVector<MachineBasicBlock *, 2> Succs;
@@ -1071,21 +992,24 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
// Emit the code. Note that this could result in ThisBB being split, so
// we need to check for updates.
- SDB->visitSwitchCase(SDB->SwitchCases[i], BB);
+ SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
- ThisBB = CodeGenAndEmitDAG(BB);
+ CodeGenAndEmitDAG();
+ ThisBB = FuncInfo->MBB;
// Handle any PHI nodes in successors of this chunk, as if we were coming
// from the original BB before switch expansion. Note that PHI nodes can
// occur multiple times in PHINodesToUpdate. We have to be very careful to
// handle them the right number of times.
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
- BB = Succs[i];
- // BB may have been removed from the CFG if a branch was constant folded.
- if (ThisBB->isSuccessor(BB)) {
- for (MachineBasicBlock::iterator Phi = BB->begin();
- Phi != BB->end() && Phi->isPHI();
+ FuncInfo->MBB = Succs[i];
+ FuncInfo->InsertPt = FuncInfo->MBB->end();
+ // FuncInfo->MBB may have been removed from the CFG if a branch was
+ // constant folded.
+ if (ThisBB->isSuccessor(FuncInfo->MBB)) {
+ for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin();
+ Phi != FuncInfo->MBB->end() && Phi->isPHI();
++Phi) {
// This value for this PHI node is recorded in PHINodesToUpdate.
for (unsigned pn = 0; ; ++pn) {
@@ -1205,6 +1129,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
Ops.push_back(InOps[InlineAsm::Op_InputChain]); // 0
Ops.push_back(InOps[InlineAsm::Op_AsmString]); // 1
Ops.push_back(InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
+ Ops.push_back(InOps[InlineAsm::Op_IsAlignStack]); // 3
unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
if (InOps[e-1].getValueType() == MVT::Flag)
@@ -1701,7 +1626,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
SDValue(Res, ResNumResults-1));
if ((EmitNodeInfo & OPFL_FlagOutput) != 0)
- --ResNumResults;
+ --ResNumResults;
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 3786bd1..6cae804 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -278,7 +278,7 @@ std::string ScheduleDAGSDNodes::getGraphNodeLabel(const SUnit *SU) const {
FlaggedNodes.push_back(N);
while (!FlaggedNodes.empty()) {
O << DOTGraphTraits<SelectionDAG*>
- ::getSimpleNodeLabel(FlaggedNodes.back(), DAG);
+ ::getSimpleNodeLabel(FlaggedNodes.back(), DAG);
FlaggedNodes.pop_back();
if (!FlaggedNodes.empty())
O << "\n ";
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 44a80d3..4f38669 100644
--- a/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/contrib/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -20,6 +20,7 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/GlobalVariable.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -261,6 +262,38 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::MEMMOVE] = "memmove";
Names[RTLIB::MEMSET] = "memset";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
+ Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
+ Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
+ Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
+ Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
+ Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
+ Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
+ Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
+ Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
+ Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
+ Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
+ Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
+ Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and-xor_4";
+ Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
+ Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
}
/// InitLibcallCallingConvs - Set default libcall CallingConvs.
@@ -546,9 +579,9 @@ TargetLowering::TargetLowering(const TargetMachine &tm,
SchedPreferenceInfo = Sched::Latency;
JumpBufSize = 0;
JumpBufAlignment = 0;
- IfCvtBlockSizeLimit = 2;
- IfCvtDupBlockSizeLimit = 0;
PrefLoopAlignment = 0;
+ MinStackArgumentAlignment = 1;
+ ShouldFoldAtomicFences = false;
InitLibcallNames(LibcallRoutineNames);
InitCmpLibcallCCs(CmpLibcallCCs);
@@ -578,9 +611,9 @@ bool TargetLowering::canOpTrap(unsigned Op, EVT VT) const {
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
- unsigned &NumIntermediates,
- EVT &RegisterVT,
- TargetLowering* TLI) {
+ unsigned &NumIntermediates,
+ EVT &RegisterVT,
+ TargetLowering *TLI) {
// Figure out the right, legal destination reg to copy into.
unsigned NumElts = VT.getVectorNumElements();
MVT EltTy = VT.getVectorElementType();
@@ -610,16 +643,12 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
EVT DestVT = TLI->getRegisterType(NewVT);
RegisterVT = DestVT;
- if (EVT(DestVT).bitsLT(NewVT)) {
- // Value is expanded, e.g. i64 -> i16.
+ if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits());
- } else {
- // Otherwise, promotion or legal types use the same number of registers as
- // the vector decimated to the appropriate level.
- return NumVectorRegs;
- }
- return 1;
+ // Otherwise, promotion or legal types use the same number of registers as
+ // the vector decimated to the appropriate level.
+ return NumVectorRegs;
}
/// computeRegisterProperties - Once all of the register classes are added,
@@ -705,39 +734,39 @@ void TargetLowering::computeRegisterProperties() {
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
MVT VT = (MVT::SimpleValueType)i;
- if (!isTypeLegal(VT)) {
- MVT IntermediateVT;
- EVT RegisterVT;
- unsigned NumIntermediates;
- NumRegistersForVT[i] =
- getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
- RegisterVT, this);
- RegisterTypeForVT[i] = RegisterVT;
-
- // Determine if there is a legal wider type.
- bool IsLegalWiderType = false;
- EVT EltVT = VT.getVectorElementType();
- unsigned NElts = VT.getVectorNumElements();
- for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- EVT SVT = (MVT::SimpleValueType)nVT;
- if (isTypeSynthesizable(SVT) && SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts && NElts != 1) {
- TransformToType[i] = SVT;
- ValueTypeActions.setTypeAction(VT, Promote);
- IsLegalWiderType = true;
- break;
- }
+ if (isTypeLegal(VT)) continue;
+
+ MVT IntermediateVT;
+ EVT RegisterVT;
+ unsigned NumIntermediates;
+ NumRegistersForVT[i] =
+ getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates,
+ RegisterVT, this);
+ RegisterTypeForVT[i] = RegisterVT;
+
+ // Determine if there is a legal wider type.
+ bool IsLegalWiderType = false;
+ EVT EltVT = VT.getVectorElementType();
+ unsigned NElts = VT.getVectorNumElements();
+ for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+ EVT SVT = (MVT::SimpleValueType)nVT;
+ if (isTypeSynthesizable(SVT) && SVT.getVectorElementType() == EltVT &&
+ SVT.getVectorNumElements() > NElts && NElts != 1) {
+ TransformToType[i] = SVT;
+ ValueTypeActions.setTypeAction(VT, Promote);
+ IsLegalWiderType = true;
+ break;
}
- if (!IsLegalWiderType) {
- EVT NVT = VT.getPow2VectorType();
- if (NVT == VT) {
- // Type is already a power of 2. The default action is to split.
- TransformToType[i] = MVT::Other;
- ValueTypeActions.setTypeAction(VT, Expand);
- } else {
- TransformToType[i] = NVT;
- ValueTypeActions.setTypeAction(VT, Promote);
- }
+ }
+ if (!IsLegalWiderType) {
+ EVT NVT = VT.getPow2VectorType();
+ if (NVT == VT) {
+ // Type is already a power of 2. The default action is to split.
+ TransformToType[i] = MVT::Other;
+ ValueTypeActions.setTypeAction(VT, Expand);
+ } else {
+ TransformToType[i] = NVT;
+ ValueTypeActions.setTypeAction(VT, Promote);
}
}
}
@@ -811,6 +840,65 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
return 1;
}
+/// Get the EVTs and ArgFlags collections that represent the legalized return
+/// type of the given function. This does not require a DAG or a return value,
+/// and is suitable for use before any DAGs for the function are constructed.
+/// TODO: Move this out of TargetLowering.cpp.
+void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI,
+ SmallVectorImpl<uint64_t> *Offsets) {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(TLI, ReturnType, ValueVTs);
+ unsigned NumValues = ValueVTs.size();
+ if (NumValues == 0) return;
+ unsigned Offset = 0;
+
+ for (unsigned j = 0, f = NumValues; j != f; ++j) {
+ EVT VT = ValueVTs[j];
+ ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
+
+ if (attr & Attribute::SExt)
+ ExtendKind = ISD::SIGN_EXTEND;
+ else if (attr & Attribute::ZExt)
+ ExtendKind = ISD::ZERO_EXTEND;
+
+ // FIXME: C calling convention requires the return type to be promoted to
+ // at least 32-bit. But this is not necessary for non-C calling
+ // conventions. The frontend should mark functions whose return values
+ // require promoting with signext or zeroext attributes.
+ if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
+ EVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
+ if (VT.bitsLT(MinVT))
+ VT = MinVT;
+ }
+
+ unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
+ EVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
+ unsigned PartSize = TLI.getTargetData()->getTypeAllocSize(
+ PartVT.getTypeForEVT(ReturnType->getContext()));
+
+ // 'inreg' on function refers to return value
+ ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
+ if (attr & Attribute::InReg)
+ Flags.setInReg();
+
+ // Propagate extension type if any
+ if (attr & Attribute::SExt)
+ Flags.setSExt();
+ else if (attr & Attribute::ZExt)
+ Flags.setZExt();
+
+ for (unsigned i = 0; i < NumParts; ++i) {
+ Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true));
+ if (Offsets) {
+ Offsets->push_back(Offset);
+ Offset += PartSize;
+ }
+ }
+ }
+}
+
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
@@ -1042,7 +1130,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-1 bits are only known if set in both the LHS & RHS.
@@ -1076,7 +1164,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-0 bits are only known if clear in both the LHS & RHS.
@@ -1101,7 +1189,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
if ((KnownZero2 & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(1));
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// If all of the unknown bits are known to be zero on one side or the other
@@ -1498,13 +1586,17 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
break;
}
case ISD::AssertZext: {
- EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- APInt InMask = APInt::getLowBitsSet(BitWidth,
- VT.getSizeInBits());
- if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
+ // Demand all the bits of the input that are demanded in the output.
+ // The low bits are obvious; the high bits are demanded because we're
+ // asserting that they're zero here.
+ if (SimplifyDemandedBits(Op.getOperand(0), NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+
+ EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
+ APInt InMask = APInt::getLowBitsSet(BitWidth,
+ VT.getSizeInBits());
KnownZero |= ~InMask & NewMask;
break;
}
@@ -1544,7 +1636,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
KnownOne2, TLO, Depth+1))
return true;
// See if the operation should be performed at a smaller bit width.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
}
// FALL THROUGH
@@ -2346,7 +2438,6 @@ const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{
/// vector. If it is invalid, don't add anything to Ops.
void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
switch (ConstraintLetter) {
@@ -2384,7 +2475,8 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
if (ConstraintLetter != 'n') {
int64_t Offs = GA->getOffset();
if (C) Offs += C->getZExtValue();
- Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
+ Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(),
+ C->getDebugLoc(),
Op.getValueType(), Offs));
return;
}
@@ -2507,18 +2599,18 @@ static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
/// 'm' over 'r', for example.
///
static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
- bool hasMemory, const TargetLowering &TLI,
+ const TargetLowering &TLI,
SDValue Op, SelectionDAG *DAG) {
assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
unsigned BestIdx = 0;
TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
int BestGenerality = -1;
-
+
// Loop over the options, keeping track of the most general one.
for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
TargetLowering::ConstraintType CType =
TLI.getConstraintType(OpInfo.Codes[i]);
-
+
// If this is an 'other' constraint, see if the operand is valid for it.
// For example, on X86 we might have an 'rI' constraint. If the operand
// is an integer in the range [0..31] we want to use I (saving a load
@@ -2527,7 +2619,7 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
assert(OpInfo.Codes[i].size() == 1 &&
"Unhandled multi-letter 'other' constraint");
std::vector<SDValue> ResultOps;
- TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory,
+ TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0],
ResultOps, *DAG);
if (!ResultOps.empty()) {
BestType = CType;
@@ -2536,6 +2628,11 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
}
}
+ // Things with matching constraints can only be registers, per gcc
+ // documentation. This mainly affects "g" constraints.
+ if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
+ continue;
+
// This constraint letter is more general than the previous one, use it.
int Generality = getConstraintGenerality(CType);
if (Generality > BestGenerality) {
@@ -2554,7 +2651,6 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
/// OpInfo.ConstraintCode and OpInfo.ConstraintType.
void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- bool hasMemory,
SelectionDAG *DAG) const {
assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
@@ -2563,7 +2659,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
OpInfo.ConstraintCode = OpInfo.Codes[0];
OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
} else {
- ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG);
+ ChooseConstraint(OpInfo, *this, Op, DAG);
}
// 'X' matches anything.
diff --git a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
index 5240bef..6ab0cb0 100644
--- a/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
+++ b/contrib/llvm/lib/CodeGen/ShadowStackGC.cpp
@@ -31,6 +31,7 @@
#include "llvm/CodeGen/GCStrategy.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/IRBuilder.h"
using namespace llvm;
@@ -158,7 +159,8 @@ namespace {
// Create a new invoke instruction.
Args.clear();
- Args.append(CI->op_begin() + 1, CI->op_end());
+ CallSite CS(CI);
+ Args.append(CS.arg_begin(), CS.arg_end());
InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
NewBB, CleanupBB,
@@ -194,7 +196,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
unsigned NumMeta = 0;
SmallVector<Constant*,16> Metadata;
for (unsigned I = 0; I != Roots.size(); ++I) {
- Constant *C = cast<Constant>(Roots[I].first->getOperand(2));
+ Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
if (!C->isNullValue())
NumMeta = I + 1;
Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
@@ -322,16 +324,16 @@ void ShadowStackGC::CollectRoots(Function &F) {
assert(Roots.empty() && "Not cleaned up?");
- SmallVector<std::pair<CallInst*,AllocaInst*>,16> MetaRoots;
+ SmallVector<std::pair<CallInst*, AllocaInst*>, 16> MetaRoots;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
if (Function *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::gcroot) {
- std::pair<CallInst*,AllocaInst*> Pair = std::make_pair(
- CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts()));
- if (IsNullValue(CI->getOperand(2)))
+ std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
+ CI, cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
+ if (IsNullValue(CI->getArgOperand(1)))
Roots.push_back(Pair);
else
MetaRoots.push_back(Pair);
diff --git a/contrib/llvm/lib/CodeGen/SimpleHazardRecognizer.h b/contrib/llvm/lib/CodeGen/SimpleHazardRecognizer.h
deleted file mode 100644
index f69feaf..0000000
--- a/contrib/llvm/lib/CodeGen/SimpleHazardRecognizer.h
+++ /dev/null
@@ -1,89 +0,0 @@
-//=- llvm/CodeGen/SimpleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SimpleHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-namespace llvm {
- /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
- /// a coarse classification and attempts to avoid that instructions of
- /// a given class aren't grouped too densely together.
- class SimpleHazardRecognizer : public ScheduleHazardRecognizer {
- /// Class - A simple classification for SUnits.
- enum Class {
- Other, Load, Store
- };
-
- /// Window - The Class values of the most recently issued
- /// instructions.
- Class Window[8];
-
- /// getClass - Classify the given SUnit.
- Class getClass(const SUnit *SU) {
- const MachineInstr *MI = SU->getInstr();
- const TargetInstrDesc &TID = MI->getDesc();
- if (TID.mayLoad())
- return Load;
- if (TID.mayStore())
- return Store;
- return Other;
- }
-
- /// Step - Rotate the existing entries in Window and insert the
- /// given class value in position as the most recent.
- void Step(Class C) {
- std::copy(Window+1, array_endof(Window), Window);
- Window[array_lengthof(Window)-1] = C;
- }
-
- public:
- SimpleHazardRecognizer() : Window() {
- Reset();
- }
-
- virtual HazardType getHazardType(SUnit *SU) {
- Class C = getClass(SU);
- if (C == Other)
- return NoHazard;
- unsigned Score = 0;
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- if (Window[i] == C)
- Score += i + 1;
- if (Score > array_lengthof(Window) * 2)
- return Hazard;
- return NoHazard;
- }
-
- virtual void Reset() {
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- Window[i] = Other;
- }
-
- virtual void EmitInstruction(SUnit *SU) {
- Step(getClass(SU));
- }
-
- virtual void AdvanceCycle() {
- Step(Other);
- }
- };
-}
-
-#endif
diff --git a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
index ed3c243..e69d3e4 100644
--- a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -99,15 +99,23 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
///
/// This returns true if an interval was modified.
///
-bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
- LiveInterval &IntB,
+bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
MachineInstr *CopyMI) {
+ // Bail if there is no dst interval - can happen when merging physical subreg
+ // operations.
+ if (!li_->hasInterval(CP.getDstReg()))
+ return false;
+
+ LiveInterval &IntA =
+ li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ LiveInterval &IntB =
+ li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
- assert(BLR != IntB.end() && "Live range not found!");
+ if (BLR == IntB.end()) return false;
VNInfo *BValNo = BLR->valno;
// Get the location that B is defined at. Two options: either this value has
@@ -119,7 +127,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// AValNo is the value number in A that defines the copy, A3 in the example.
SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
- assert(ALR != IntA.end() && "Live range not found!");
+ // The live range might not exist after fun with physreg coalescing.
+ if (ALR == IntA.end()) return false;
VNInfo *AValNo = ALR->valno;
// If it's re-defined by an early clobber somewhere in the live range, then
// it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
@@ -145,26 +154,21 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
- unsigned SrcReg = li_->getVNInfoSourceReg(AValNo);
- if (!SrcReg) return false; // Not defined by a copy.
-
- // If the value number is not defined by a copy instruction, ignore it.
-
- // If the source register comes from an interval other than IntB, we can't
- // handle this.
- if (SrcReg != IntB.reg) return false;
+ if (!CP.isCoalescable(AValNo->getCopy()))
+ return false;
// Get the LiveRange in IntB that this value number starts with.
LiveInterval::iterator ValLR =
IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
- assert(ValLR != IntB.end() && "Live range not found!");
+ if (ValLR == IntB.end())
+ return false;
// Make sure that the end of the live range is inside the same block as
// CopyMI.
MachineInstr *ValLREndInst =
li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
- if (!ValLREndInst ||
- ValLREndInst->getParent() != CopyMI->getParent()) return false;
+ if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
+ return false;
// Okay, we now know that ValLR ends in the same block that the CopyMI
// live-range starts. If there are no intervening live ranges between them in
@@ -207,6 +211,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// physreg has sub-registers, update their live intervals as well.
if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
+ if (!li_->hasInterval(*SR))
+ continue;
LiveInterval &SRLI = li_->getInterval(*SR);
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
SRLI.getNextValue(FillerStart, 0, true,
@@ -216,7 +222,6 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// Okay, merge "B1" into the same value number as "B0".
if (BValNo != ValLR->valno) {
- IntB.addKills(ValLR->valno, BValNo->kills);
IntB.MergeValueNumberInto(BValNo, ValLR->valno);
}
DEBUG({
@@ -230,13 +235,12 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
if (UIdx != -1) {
ValLREndInst->getOperand(UIdx).setIsKill(false);
- ValLR->valno->removeKill(FillerStart);
}
// If the copy instruction was killing the destination register before the
// merge, find the last use and trim the live range. That will also add the
// isKill marker.
- if (ALR->valno->isKill(CopyIdx))
+ if (ALR->end == CopyIdx)
TrimLiveIntervalToLastUse(CopyUseIdx, CopyMI->getParent(), IntA, ALR);
++numExtends;
@@ -304,23 +308,31 @@ TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
///
/// This returns true if an interval was modified.
///
-bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
- LiveInterval &IntB,
+bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
MachineInstr *CopyMI) {
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
-
// FIXME: For now, only eliminate the copy by commuting its def when the
// source register is a virtual register. We want to guard against cases
// where the copy is a back edge copy and commuting the def lengthen the
// live interval of the source register to the entire loop.
- if (TargetRegisterInfo::isPhysicalRegister(IntA.reg))
+ if (CP.isPhys() && CP.isFlipped())
+ return false;
+
+ // Bail if there is no dst interval.
+ if (!li_->hasInterval(CP.getDstReg()))
return false;
+ SlotIndex CopyIdx =
+ li_->getInstructionIndex(CopyMI).getDefIndex();
+
+ LiveInterval &IntA =
+ li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+ LiveInterval &IntB =
+ li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
+
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
- assert(BLR != IntB.end() && "Live range not found!");
+ if (BLR == IntB.end()) return false;
VNInfo *BValNo = BLR->valno;
// Get the location that B is defined at. Two options: either this value has
@@ -342,6 +354,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
AValNo->isUnused() || AValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
+ if (!DefMI)
+ return false;
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isCommutable())
return false;
@@ -380,7 +394,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// clobbers from the superreg.
if (BHasSubRegs)
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR)
- if (HasOtherReachingDefs(IntA, li_->getInterval(*SR), AValNo, 0))
+ if (li_->hasInterval(*SR) &&
+ HasOtherReachingDefs(IntA, li_->getInterval(*SR), AValNo, 0))
return false;
// If some of the uses of IntA.reg is already coalesced away, return false.
@@ -413,7 +428,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
bool BHasPHIKill = BValNo->hasPHIKill();
SmallVector<VNInfo*, 4> BDeadValNos;
- VNInfo::KillSet BKills;
std::map<SlotIndex, SlotIndex> BExtend;
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
@@ -424,8 +438,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// C = A<kill>
// ...
// = B
- //
- // then do not add kills of A to the newly created B interval.
bool Extended = BLR->end > ALR->end && ALR->end != ALR->start;
if (Extended)
BExtend[ALR->end] = BLR->end;
@@ -448,34 +460,38 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
- UseMO.setReg(NewReg);
+ if (TargetRegisterInfo::isPhysicalRegister(NewReg))
+ UseMO.substPhysReg(NewReg, *tri_);
+ else
+ UseMO.setReg(NewReg);
if (UseMI == CopyMI)
continue;
if (UseMO.isKill()) {
if (Extended)
UseMO.setIsKill(false);
- else
- BKills.push_back(UseIdx.getDefIndex());
}
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
+ if (UseMI->isCopy()) {
+ if (UseMI->getOperand(0).getReg() != IntB.reg ||
+ UseMI->getOperand(0).getSubReg())
+ continue;
+ } else if (tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)){
+ if (DstReg != IntB.reg || DstSubIdx)
+ continue;
+ } else
continue;
- if (DstReg == IntB.reg && DstSubIdx == 0) {
- // This copy will become a noop. If it's defining a new val#,
- // remove that val# as well. However this live range is being
- // extended to the end of the existing live range defined by the copy.
- SlotIndex DefIdx = UseIdx.getDefIndex();
- const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
- BHasPHIKill |= DLR->valno->hasPHIKill();
- assert(DLR->valno->def == DefIdx);
- BDeadValNos.push_back(DLR->valno);
- BExtend[DLR->start] = DLR->end;
- JoinedCopies.insert(UseMI);
- // If this is a kill but it's going to be removed, the last use
- // of the same val# is the new kill.
- if (UseMO.isKill())
- BKills.pop_back();
- }
+ // This copy will become a noop. If it's defining a new val#,
+ // remove that val# as well. However this live range is being
+ // extended to the end of the existing live range defined by the copy.
+ SlotIndex DefIdx = UseIdx.getDefIndex();
+ const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
+ if (!DLR)
+ continue;
+ BHasPHIKill |= DLR->valno->hasPHIKill();
+ assert(DLR->valno->def == DefIdx);
+ BDeadValNos.push_back(DLR->valno);
+ BExtend[DLR->start] = DLR->end;
+ JoinedCopies.insert(UseMI);
}
// We need to insert a new liverange: [ALR.start, LastUse). It may be we can
@@ -490,24 +506,21 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
VNInfo *DeadVNI = BDeadValNos[i];
if (BHasSubRegs) {
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
+ if (!li_->hasInterval(*SR))
+ continue;
LiveInterval &SRLI = li_->getInterval(*SR);
- const LiveRange *SRLR = SRLI.getLiveRangeContaining(DeadVNI->def);
- SRLI.removeValNo(SRLR->valno);
+ if (const LiveRange *SRLR = SRLI.getLiveRangeContaining(DeadVNI->def))
+ SRLI.removeValNo(SRLR->valno);
}
}
IntB.removeValNo(BDeadValNos[i]);
}
// Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
- // is updated. Kills are also updated.
+ // is updated.
VNInfo *ValNo = BValNo;
ValNo->def = AValNo->def;
ValNo->setCopy(0);
- for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
- if (ValNo->kills[j] != BLR->end)
- BKills.push_back(ValNo->kills[j]);
- }
- ValNo->kills.clear();
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
@@ -517,18 +530,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
if (EI != BExtend.end())
End = EI->second;
IntB.addRange(LiveRange(AI->start, End, ValNo));
-
- // If the IntB live range is assigned to a physical register, and if that
- // physreg has sub-registers, update their live intervals as well.
- if (BHasSubRegs) {
- for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
- LiveInterval &SRLI = li_->getInterval(*SR);
- SRLI.MergeInClobberRange(*li_, AI->start, End,
- li_->getVNInfoAllocator());
- }
- }
}
- IntB.addKills(ValNo, BKills);
ValNo->setHasPHIKill(BHasPHIKill);
DEBUG({
@@ -621,7 +623,11 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
// of last use.
LastUse->setIsKill();
removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
- LR->valno->addKill(LastUseIdx.getDefIndex());
+ if (LastUseMI->isCopy()) {
+ MachineOperand &DefMO = LastUseMI->getOperand(0);
+ if (DefMO.getReg() == li.reg && !DefMO.getSubReg())
+ DefMO.setIsDead();
+ }
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
DstReg == li.reg && DstSubIdx == 0) {
@@ -663,6 +669,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
ValNo->isUnused() || ValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+ assert(DefMI && "Defining instruction disappeared");
const TargetInstrDesc &TID = DefMI->getDesc();
if (!TID.isAsCheapAsAMove())
return false;
@@ -701,33 +708,20 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
return false;
}
- SlotIndex DefIdx = CopyIdx.getDefIndex();
- const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
- DLR->valno->setCopy(0);
- // Don't forget to update sub-register intervals.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- for (const unsigned* SR = tri_->getSubRegisters(DstReg); *SR; ++SR) {
- if (!li_->hasInterval(*SR))
- continue;
- const LiveRange *DLR =
- li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- }
- }
+ RemoveCopyFlag(DstReg, CopyMI);
// If copy kills the source register, find the last use and propagate
// kill.
bool checkForDeadDef = false;
MachineBasicBlock *MBB = CopyMI->getParent();
- if (SrcLR->valno->isKill(DefIdx))
+ if (SrcLR->end == CopyIdx.getDefIndex())
if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) {
checkForDeadDef = true;
}
MachineBasicBlock::iterator MII =
llvm::next(MachineBasicBlock::iterator(CopyMI));
- tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, tri_);
+ tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
MachineInstr *NewMI = prior(MII);
if (checkForDeadDef) {
@@ -747,24 +741,8 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
MachineOperand &MO = CopyMI->getOperand(i);
if (MO.isReg() && MO.isImplicit())
NewMI->addOperand(MO);
- if (MO.isDef() && li_->hasInterval(MO.getReg())) {
- unsigned Reg = MO.getReg();
- const LiveRange *DLR =
- li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- // Handle subregs as well
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (const unsigned* SR = tri_->getSubRegisters(Reg); *SR; ++SR) {
- if (!li_->hasInterval(*SR))
- continue;
- const LiveRange *DLR =
- li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
- if (DLR && DLR->valno->getCopy() == CopyMI)
- DLR->valno->setCopy(0);
- }
- }
- }
+ if (MO.isDef())
+ RemoveCopyFlag(MO.getReg(), CopyMI);
}
TransferImplicitOps(CopyMI, NewMI);
@@ -783,84 +761,72 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
void
-SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
- unsigned SubIdx) {
- bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
- if (DstIsPhys && SubIdx) {
- // Figure out the real physical register we are updating with.
- DstReg = tri_->getSubReg(DstReg, SubIdx);
- SubIdx = 0;
- }
-
- // Copy the register use-list before traversing it. We may be adding operands
- // and invalidating pointers.
- SmallVector<std::pair<MachineInstr*, unsigned>, 32> reglist;
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg),
- E = mri_->reg_end(); I != E; ++I)
- reglist.push_back(std::make_pair(&*I, I.getOperandNo()));
-
- for (unsigned N=0; N != reglist.size(); ++N) {
- MachineInstr *UseMI = reglist[N].first;
- MachineOperand &O = UseMI->getOperand(reglist[N].second);
- unsigned OldSubIdx = O.getSubReg();
+SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
+ bool DstIsPhys = CP.isPhys();
+ unsigned SrcReg = CP.getSrcReg();
+ unsigned DstReg = CP.getDstReg();
+ unsigned SubIdx = CP.getSubIdx();
+
+ for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
+ MachineInstr *UseMI = I.skipInstruction();) {
+ // A PhysReg copy that won't be coalesced can perhaps be rematerialized
+ // instead.
if (DstIsPhys) {
- unsigned UseDstReg = DstReg;
- if (OldSubIdx)
- UseDstReg = tri_->getSubReg(DstReg, OldSubIdx);
-
unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
if (tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
CopySrcSubIdx, CopyDstSubIdx) &&
- CopySrcSubIdx == 0 &&
- CopyDstSubIdx == 0 &&
- CopySrcReg != CopyDstReg &&
- CopySrcReg == SrcReg && CopyDstReg != UseDstReg) {
- // If the use is a copy and it won't be coalesced away, and its source
- // is defined by a trivial computation, try to rematerialize it instead.
- if (!JoinedCopies.count(UseMI) &&
- ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg,
- CopyDstSubIdx, UseMI))
- continue;
- }
+ CopySrcSubIdx == 0 && CopyDstSubIdx == 0 &&
+ CopySrcReg != CopyDstReg && CopySrcReg == SrcReg &&
+ CopyDstReg != DstReg && !JoinedCopies.count(UseMI) &&
+ ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg, 0,
+ UseMI))
+ continue;
- O.setReg(UseDstReg);
- O.setSubReg(0);
- if (OldSubIdx) {
- // Def and kill of subregister of a virtual register actually defs and
- // kills the whole register. Add imp-defs and imp-kills as needed.
- if (O.isDef()) {
- if(O.isDead())
- UseMI->addRegisterDead(DstReg, tri_, true);
- else
- UseMI->addRegisterDefined(DstReg, tri_);
- } else if (!O.isUndef() &&
- (O.isKill() ||
- UseMI->isRegTiedToDefOperand(&O-&UseMI->getOperand(0))))
- UseMI->addRegisterKilled(DstReg, tri_, true);
- }
+ if (UseMI->isCopy() &&
+ !UseMI->getOperand(1).getSubReg() &&
+ !UseMI->getOperand(0).getSubReg() &&
+ UseMI->getOperand(1).getReg() == SrcReg &&
+ UseMI->getOperand(0).getReg() != SrcReg &&
+ UseMI->getOperand(0).getReg() != DstReg &&
+ !JoinedCopies.count(UseMI) &&
+ ReMaterializeTrivialDef(li_->getInterval(SrcReg),
+ UseMI->getOperand(0).getReg(), 0, UseMI))
+ continue;
+ }
- DEBUG({
- dbgs() << "\t\tupdated: ";
- if (!UseMI->isDebugValue())
- dbgs() << li_->getInstructionIndex(UseMI) << "\t";
- dbgs() << *UseMI;
- });
- continue;
+ SmallVector<unsigned,8> Ops;
+ bool Reads, Writes;
+ tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
+ bool Kills = false, Deads = false;
+
+ // Replace SrcReg with DstReg in all UseMI operands.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ MachineOperand &MO = UseMI->getOperand(Ops[i]);
+ Kills |= MO.isKill();
+ Deads |= MO.isDead();
+
+ if (DstIsPhys)
+ MO.substPhysReg(DstReg, *tri_);
+ else
+ MO.substVirtReg(DstReg, SubIdx, *tri_);
}
- // Sub-register indexes goes from small to large. e.g.
- // RAX: 1 -> AL, 2 -> AX, 3 -> EAX
- // EAX: 1 -> AL, 2 -> AX
- // So RAX's sub-register 2 is AX, RAX's sub-regsiter 3 is EAX, whose
- // sub-register 2 is also AX.
- //
- // FIXME: Properly compose subreg indices for all targets.
- //
- if (SubIdx && OldSubIdx && SubIdx != OldSubIdx)
- ;
- else if (SubIdx)
- O.setSubReg(SubIdx);
- O.setReg(DstReg);
+ // This instruction is a copy that will be removed.
+ if (JoinedCopies.count(UseMI))
+ continue;
+
+ if (SubIdx) {
+ // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
+ // read-modify-write of DstReg.
+ if (Deads)
+ UseMI->addRegisterDead(DstReg, tri_);
+ else if (!Reads && Writes)
+ UseMI->addRegisterDefined(DstReg, tri_);
+
+ // Kill flags apply to the whole physical register.
+ if (DstIsPhys && Kills)
+ UseMI->addRegisterKilled(DstReg, tri_);
+ }
DEBUG({
dbgs() << "\t\tupdated: ";
@@ -869,15 +835,15 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
dbgs() << *UseMI;
});
+
// After updating the operand, check if the machine instruction has
// become a copy. If so, update its val# information.
- if (JoinedCopies.count(UseMI))
+ const TargetInstrDesc &TID = UseMI->getDesc();
+ if (DstIsPhys || TID.getNumDefs() != 1 || TID.getNumOperands() <= 2)
continue;
- const TargetInstrDesc &TID = UseMI->getDesc();
unsigned CopySrcReg, CopyDstReg, CopySrcSubIdx, CopyDstSubIdx;
- if (TID.getNumDefs() == 1 && TID.getNumOperands() > 2 &&
- tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
+ if (tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg,
CopySrcSubIdx, CopyDstSubIdx) &&
CopySrcReg != CopyDstReg &&
(TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
@@ -945,6 +911,27 @@ bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
return removeIntervalIfEmpty(li, li_, tri_);
}
+void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
+ const MachineInstr *CopyMI) {
+ SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+ if (li_->hasInterval(DstReg)) {
+ LiveInterval &LI = li_->getInterval(DstReg);
+ if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+ if (LR->valno->getCopy() == CopyMI)
+ LR->valno->setCopy(0);
+ }
+ if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
+ return;
+ for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
+ if (!li_->hasInterval(*AS))
+ continue;
+ LiveInterval &LI = li_->getInterval(*AS);
+ if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+ if (LR->valno->getCopy() == CopyMI)
+ LR->valno->setCopy(0);
+ }
+}
+
/// PropagateDeadness - Propagate the dead marker to the instruction which
/// defines the val#.
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
@@ -978,8 +965,8 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
// Live-in to the function but dead. Remove it from entry live-in set.
if (mf_->begin()->isLiveIn(li.reg))
mf_->begin()->removeLiveIn(li.reg);
- const LiveRange *LR = li.getLiveRangeContaining(CopyIdx);
- removeRange(li, LR->start, LR->end, li_, tri_);
+ if (const LiveRange *LR = li.getLiveRangeContaining(CopyIdx))
+ removeRange(li, LR->start, LR->end, li_, tri_);
return removeIntervalIfEmpty(li, li_, tri_);
}
@@ -1017,147 +1004,12 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
// val#, then propagate the dead marker.
PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_);
++numDeadValNo;
-
- if (LR->valno->isKill(RemoveEnd))
- LR->valno->removeKill(RemoveEnd);
}
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
return removeIntervalIfEmpty(li, li_, tri_);
}
-/// CanCoalesceWithImpDef - Returns true if the specified copy instruction
-/// from an implicit def to another register can be coalesced away.
-bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
- LiveInterval &li,
- LiveInterval &ImpLi) const{
- if (!CopyMI->killsRegister(ImpLi.reg))
- return false;
- // Make sure this is the only use.
- for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(ImpLi.reg),
- UE = mri_->use_end(); UI != UE;) {
- MachineInstr *UseMI = &*UI;
- ++UI;
- if (CopyMI == UseMI || JoinedCopies.count(UseMI))
- continue;
- return false;
- }
- return true;
-}
-
-
-/// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
-/// a virtual destination register with physical source register.
-bool
-SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt,
- LiveInterval &SrcInt) {
- // If the virtual register live interval is long but it has low use desity,
- // do not join them, instead mark the physical register as its allocation
- // preference.
- const TargetRegisterClass *RC = mri_->getRegClass(DstInt.reg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(DstInt);
- if (Length > Threshold &&
- std::distance(mri_->use_nodbg_begin(DstInt.reg),
- mri_->use_nodbg_end()) * Threshold < Length)
- return false;
-
- // If the virtual register live interval extends into a loop, turn down
- // aggressiveness.
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
- const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
- if (!L) {
- // Let's see if the virtual register live interval extends into the loop.
- LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
- assert(DLR != DstInt.end() && "Live range not found!");
- DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
- if (DLR != DstInt.end()) {
- CopyMBB = li_->getMBBFromIndex(DLR->start);
- L = loopInfo->getLoopFor(CopyMBB);
- }
- }
-
- if (!L || Length <= Threshold)
- return true;
-
- SlotIndex UseIdx = CopyIdx.getUseIndex();
- LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
- MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
- if (loopInfo->getLoopFor(SMBB) != L) {
- if (!loopInfo->isLoopHeader(CopyMBB))
- return false;
- // If vr's live interval extends pass the loop header, do not join.
- for (MachineBasicBlock::succ_iterator SI = CopyMBB->succ_begin(),
- SE = CopyMBB->succ_end(); SI != SE; ++SI) {
- MachineBasicBlock *SuccMBB = *SI;
- if (SuccMBB == CopyMBB)
- continue;
- if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
- li_->getMBBEndIdx(SuccMBB)))
- return false;
- }
- }
- return true;
-}
-
-/// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
-/// copy from a virtual source register to a physical destination register.
-bool
-SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt,
- LiveInterval &SrcInt) {
- // If the virtual register live interval is long but it has low use density,
- // do not join them, instead mark the physical register as its allocation
- // preference.
- const TargetRegisterClass *RC = mri_->getRegClass(SrcInt.reg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(SrcInt);
- if (Length > Threshold &&
- std::distance(mri_->use_nodbg_begin(SrcInt.reg),
- mri_->use_nodbg_end()) * Threshold < Length)
- return false;
-
- if (SrcInt.empty())
- // Must be implicit_def.
- return false;
-
- // If the virtual register live interval is defined or cross a loop, turn
- // down aggressiveness.
- SlotIndex CopyIdx =
- li_->getInstructionIndex(CopyMI).getDefIndex();
- SlotIndex UseIdx = CopyIdx.getUseIndex();
- LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
- assert(SLR != SrcInt.end() && "Live range not found!");
- SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
- if (SLR == SrcInt.end())
- return true;
- MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
- const MachineLoop *L = loopInfo->getLoopFor(SMBB);
-
- if (!L || Length <= Threshold)
- return true;
-
- if (loopInfo->getLoopFor(CopyMBB) != L) {
- if (SMBB != L->getLoopLatch())
- return false;
- // If vr's live interval is extended from before the loop latch, do not
- // join.
- for (MachineBasicBlock::pred_iterator PI = SMBB->pred_begin(),
- PE = SMBB->pred_end(); PI != PE; ++PI) {
- MachineBasicBlock *PredMBB = *PI;
- if (PredMBB == SMBB)
- continue;
- if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
- li_->getMBBEndIdx(PredMBB)))
- return false;
- }
- }
- return true;
-}
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
/// two virtual registers from different register classes.
@@ -1203,157 +1055,6 @@ SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg,
return true;
}
-/// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
-/// register with a physical register, check if any of the virtual register
-/// operand is a sub-register use or def. If so, make sure it won't result
-/// in an illegal extract_subreg or insert_subreg instruction. e.g.
-/// vr1024 = extract_subreg vr1025, 1
-/// ...
-/// vr1024 = mov8rr AH
-/// If vr1024 is coalesced with AH, the extract_subreg is now illegal since
-/// AH does not have a super-reg whose sub-register 1 is AH.
-bool
-SimpleRegisterCoalescing::HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
- unsigned VirtReg,
- unsigned PhysReg) {
- for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(VirtReg),
- E = mri_->reg_end(); I != E; ++I) {
- MachineOperand &O = I.getOperand();
- if (O.isDebug())
- continue;
- MachineInstr *MI = &*I;
- if (MI == CopyMI || JoinedCopies.count(MI))
- continue;
- unsigned SubIdx = O.getSubReg();
- if (SubIdx && !tri_->getSubReg(PhysReg, SubIdx))
- return true;
- if (MI->isExtractSubreg()) {
- SubIdx = MI->getOperand(2).getImm();
- if (O.isUse() && !tri_->getSubReg(PhysReg, SubIdx))
- return true;
- if (O.isDef()) {
- unsigned SrcReg = MI->getOperand(1).getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isPhysicalRegister(SrcReg)
- ? tri_->getPhysicalRegisterRegClass(SrcReg)
- : mri_->getRegClass(SrcReg);
- if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
- return true;
- }
- }
- if (MI->isInsertSubreg() || MI->isSubregToReg()) {
- SubIdx = MI->getOperand(3).getImm();
- if (VirtReg == MI->getOperand(0).getReg()) {
- if (!tri_->getSubReg(PhysReg, SubIdx))
- return true;
- } else {
- unsigned DstReg = MI->getOperand(0).getReg();
- const TargetRegisterClass *RC =
- TargetRegisterInfo::isPhysicalRegister(DstReg)
- ? tri_->getPhysicalRegisterRegClass(DstReg)
- : mri_->getRegClass(DstReg);
- if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
- return true;
- }
- }
- }
- return false;
-}
-
-
-/// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
-/// an extract_subreg where dst is a physical register, e.g.
-/// cl = EXTRACT_SUBREG reg1024, 1
-bool
-SimpleRegisterCoalescing::CanJoinExtractSubRegToPhysReg(unsigned DstReg,
- unsigned SrcReg, unsigned SubIdx,
- unsigned &RealDstReg) {
- const TargetRegisterClass *RC = mri_->getRegClass(SrcReg);
- RealDstReg = tri_->getMatchingSuperReg(DstReg, SubIdx, RC);
- if (!RealDstReg) {
- DEBUG(dbgs() << "\tIncompatible source regclass: "
- << "none of the super-registers of " << tri_->getName(DstReg)
- << " are in " << RC->getName() << ".\n");
- return false;
- }
-
- LiveInterval &RHS = li_->getInterval(SrcReg);
- // For this type of EXTRACT_SUBREG, conservatively
- // check if the live interval of the source register interfere with the
- // actual super physical register we are trying to coalesce with.
- if (li_->hasInterval(RealDstReg) &&
- RHS.overlaps(li_->getInterval(RealDstReg))) {
- DEBUG({
- dbgs() << "\t\tInterfere with register ";
- li_->getInterval(RealDstReg).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- for (const unsigned* SR = tri_->getSubRegisters(RealDstReg); *SR; ++SR)
- // Do not check DstReg or its sub-register. JoinIntervals() will take care
- // of that.
- if (*SR != DstReg &&
- !tri_->isSubRegister(DstReg, *SR) &&
- li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "\t\tInterfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- return true;
-}
-
-/// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
-/// an insert_subreg where src is a physical register, e.g.
-/// reg1024 = INSERT_SUBREG reg1024, c1, 0
-bool
-SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg,
- unsigned SrcReg, unsigned SubIdx,
- unsigned &RealSrcReg) {
- const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
- RealSrcReg = tri_->getMatchingSuperReg(SrcReg, SubIdx, RC);
- if (!RealSrcReg) {
- DEBUG(dbgs() << "\tIncompatible destination regclass: "
- << "none of the super-registers of " << tri_->getName(SrcReg)
- << " are in " << RC->getName() << ".\n");
- return false;
- }
-
- LiveInterval &LHS = li_->getInterval(DstReg);
- if (li_->hasInterval(RealSrcReg) &&
- LHS.overlaps(li_->getInterval(RealSrcReg))) {
- DEBUG({
- dbgs() << "\t\tInterfere with register ";
- li_->getInterval(RealSrcReg).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- for (const unsigned* SR = tri_->getSubRegisters(RealSrcReg); *SR; ++SR)
- // Do not check SrcReg or its sub-register. JoinIntervals() will take care
- // of that.
- if (*SR != SrcReg &&
- !tri_->isSubRegister(SrcReg, *SR) &&
- li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "\t\tInterfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false; // Not coalescable
- }
- return true;
-}
-
-/// getRegAllocPreference - Return register allocation preference register.
-///
-static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF,
- MachineRegisterInfo *MRI,
- const TargetRegisterInfo *TRI) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return 0;
- std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
- return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF);
-}
/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns true
@@ -1369,354 +1070,97 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
- unsigned SrcReg, DstReg, SrcSubIdx = 0, DstSubIdx = 0;
- bool isExtSubReg = CopyMI->isExtractSubreg();
- bool isInsSubReg = CopyMI->isInsertSubreg();
- bool isSubRegToReg = CopyMI->isSubregToReg();
- unsigned SubIdx = 0;
- if (isExtSubReg) {
- DstReg = CopyMI->getOperand(0).getReg();
- DstSubIdx = CopyMI->getOperand(0).getSubReg();
- SrcReg = CopyMI->getOperand(1).getReg();
- SrcSubIdx = CopyMI->getOperand(2).getImm();
- } else if (isInsSubReg || isSubRegToReg) {
- DstReg = CopyMI->getOperand(0).getReg();
- DstSubIdx = CopyMI->getOperand(3).getImm();
- SrcReg = CopyMI->getOperand(2).getReg();
- SrcSubIdx = CopyMI->getOperand(2).getSubReg();
- if (SrcSubIdx && SrcSubIdx != DstSubIdx) {
- // r1025 = INSERT_SUBREG r1025, r1024<2>, 2 Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- DEBUG(dbgs() << "\tSource of insert_subreg or subreg_to_reg is already "
- "coalesced to another register.\n");
- return false; // Not coalescable.
- }
- } else if (tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- if (SrcSubIdx && DstSubIdx && SrcSubIdx != DstSubIdx) {
- // e.g. %reg16404:1<def> = MOV8rr %reg16412:2<kill>
- Again = true;
- return false; // Not coalescable.
- }
- } else {
- llvm_unreachable("Unrecognized copy instruction!");
+ CoalescerPair CP(*tii_, *tri_);
+ if (!CP.setRegisters(CopyMI)) {
+ DEBUG(dbgs() << "\tNot coalescable.\n");
+ return false;
}
// If they are already joined we continue.
- if (SrcReg == DstReg) {
+ if (CP.getSrcReg() == CP.getDstReg()) {
DEBUG(dbgs() << "\tCopy already coalesced.\n");
return false; // Not coalescable.
}
- bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
- bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
-
- // If they are both physical registers, we cannot join them.
- if (SrcIsPhys && DstIsPhys) {
- DEBUG(dbgs() << "\tCan not coalesce physregs.\n");
- return false; // Not coalescable.
- }
-
- // We only join virtual registers with allocatable physical registers.
- if (SrcIsPhys && !allocatableRegs_[SrcReg]) {
- DEBUG(dbgs() << "\tSrc reg is unallocatable physreg.\n");
- return false; // Not coalescable.
- }
- if (DstIsPhys && !allocatableRegs_[DstReg]) {
- DEBUG(dbgs() << "\tDst reg is unallocatable physreg.\n");
- return false; // Not coalescable.
- }
-
- // We cannot handle dual subreg indices and mismatched classes at the same
- // time.
- if (SrcSubIdx && DstSubIdx && differingRegisterClasses(SrcReg, DstReg)) {
- DEBUG(dbgs() << "\tCannot handle subreg indices and mismatched classes.\n");
- return false;
- }
+ DEBUG(dbgs() << "\tConsidering merging %reg" << CP.getSrcReg());
- // Check that a physical source register is compatible with dst regclass
- if (SrcIsPhys) {
- unsigned SrcSubReg = SrcSubIdx ?
- tri_->getSubReg(SrcReg, SrcSubIdx) : SrcReg;
- const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
- const TargetRegisterClass *DstSubRC = DstRC;
- if (DstSubIdx)
- DstSubRC = DstRC->getSubRegisterRegClass(DstSubIdx);
- assert(DstSubRC && "Illegal subregister index");
- if (!DstSubRC->contains(SrcSubReg)) {
- DEBUG(dbgs() << "\tIncompatible destination regclass: "
- << "none of the super-registers of "
- << tri_->getName(SrcSubReg) << " are in "
- << DstSubRC->getName() << ".\n");
- return false; // Not coalescable.
- }
- }
-
- // Check that a physical dst register is compatible with source regclass
- if (DstIsPhys) {
- unsigned DstSubReg = DstSubIdx ?
- tri_->getSubReg(DstReg, DstSubIdx) : DstReg;
- const TargetRegisterClass *SrcRC = mri_->getRegClass(SrcReg);
- const TargetRegisterClass *SrcSubRC = SrcRC;
- if (SrcSubIdx)
- SrcSubRC = SrcRC->getSubRegisterRegClass(SrcSubIdx);
- assert(SrcSubRC && "Illegal subregister index");
- if (!SrcSubRC->contains(DstSubReg)) {
- DEBUG(dbgs() << "\tIncompatible source regclass: "
- << "none of the super-registers of "
- << tri_->getName(DstSubReg) << " are in "
- << SrcSubRC->getName() << ".\n");
- (void)DstSubReg;
- return false; // Not coalescable.
+ // Enforce policies.
+ if (CP.isPhys()) {
+ DEBUG(dbgs() <<" with physreg %" << tri_->getName(CP.getDstReg()) << "\n");
+ // Only coalesce to allocatable physreg.
+ if (!allocatableRegs_[CP.getDstReg()]) {
+ DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
+ return false; // Not coalescable.
}
- }
-
- // Should be non-null only when coalescing to a sub-register class.
- bool CrossRC = false;
- const TargetRegisterClass *SrcRC= SrcIsPhys ? 0 : mri_->getRegClass(SrcReg);
- const TargetRegisterClass *DstRC= DstIsPhys ? 0 : mri_->getRegClass(DstReg);
- const TargetRegisterClass *NewRC = NULL;
- unsigned RealDstReg = 0;
- unsigned RealSrcReg = 0;
- if (isExtSubReg || isInsSubReg || isSubRegToReg) {
- SubIdx = CopyMI->getOperand(isExtSubReg ? 2 : 3).getImm();
- if (SrcIsPhys && isExtSubReg) {
- // r1024 = EXTRACT_SUBREG EAX, 0 then r1024 is really going to be
- // coalesced with AX.
- unsigned DstSubIdx = CopyMI->getOperand(0).getSubReg();
- if (DstSubIdx) {
- // r1024<2> = EXTRACT_SUBREG EAX, 2. Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- if (DstSubIdx != SubIdx) {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- } else
- SrcReg = tri_->getSubReg(SrcReg, SubIdx);
- SubIdx = 0;
- } else if (DstIsPhys && (isInsSubReg || isSubRegToReg)) {
- // EAX = INSERT_SUBREG EAX, r1024, 0
- unsigned SrcSubIdx = CopyMI->getOperand(2).getSubReg();
- if (SrcSubIdx) {
- // EAX = INSERT_SUBREG EAX, r1024<2>, 2 Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- if (SrcSubIdx != SubIdx) {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- } else
- DstReg = tri_->getSubReg(DstReg, SubIdx);
- SubIdx = 0;
- } else if ((DstIsPhys && isExtSubReg) ||
- (SrcIsPhys && (isInsSubReg || isSubRegToReg))) {
- if (!isSubRegToReg && CopyMI->getOperand(1).getSubReg()) {
- DEBUG(dbgs() << "\tSrc of extract_subreg already coalesced with reg"
- << " of a super-class.\n");
- return false; // Not coalescable.
- }
-
- // FIXME: The following checks are somewhat conservative. Perhaps a better
- // way to implement this is to treat this as coalescing a vr with the
- // super physical register.
- if (isExtSubReg) {
- if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealDstReg))
- return false; // Not coalescable
- } else {
- if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
- return false; // Not coalescable
- }
- SubIdx = 0;
- } else {
- unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
- : CopyMI->getOperand(2).getSubReg();
- if (OldSubIdx) {
- if (OldSubIdx == SubIdx && !differingRegisterClasses(SrcReg, DstReg))
- // r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
- // coalesced to a larger register so the subreg indices cancel out.
- // Also check if the other larger register is of the same register
- // class as the would be resulting register.
- SubIdx = 0;
- else {
- DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
- return false; // Not coalescable.
- }
- }
- if (SubIdx) {
- if (!DstIsPhys && !SrcIsPhys) {
- if (isInsSubReg || isSubRegToReg) {
- NewRC = tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx);
- } else // extract_subreg {
- NewRC = tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx);
- }
- if (!NewRC) {
- DEBUG(dbgs() << "\t Conflicting sub-register indices.\n");
- return false; // Not coalescable
- }
+ } else {
+ DEBUG({
+ dbgs() << " with reg%" << CP.getDstReg();
+ if (CP.getSubIdx())
+ dbgs() << ":" << tri_->getSubRegIndexName(CP.getSubIdx());
+ dbgs() << " to " << CP.getNewRC()->getName() << "\n";
+ });
- if (!isWinToJoinCrossClass(SrcReg, DstReg, SrcRC, DstRC, NewRC)) {
- DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
- << SrcRC->getName() << "/"
- << DstRC->getName() << " -> "
- << NewRC->getName() << ".\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
- }
- }
- } else if (differingRegisterClasses(SrcReg, DstReg)) {
- if (DisableCrossClassJoin)
- return false;
- CrossRC = true;
-
- // FIXME: What if the result of a EXTRACT_SUBREG is then coalesced
- // with another? If it's the resulting destination register, then
- // the subidx must be propagated to uses (but only those defined
- // by the EXTRACT_SUBREG). If it's being coalesced into another
- // register, it should be safe because register is assumed to have
- // the register class of the super-register.
-
- // Process moves where one of the registers have a sub-register index.
- MachineOperand *DstMO = CopyMI->findRegisterDefOperand(DstReg);
- MachineOperand *SrcMO = CopyMI->findRegisterUseOperand(SrcReg);
- SubIdx = DstMO->getSubReg();
- if (SubIdx) {
- if (SrcMO->getSubReg())
- // FIXME: can we handle this?
+ // Avoid constraining virtual register regclass too much.
+ if (CP.isCrossClass()) {
+ if (DisableCrossClassJoin) {
+ DEBUG(dbgs() << "\tCross-class joins disabled.\n");
return false;
- // This is not an insert_subreg but it looks like one.
- // e.g. %reg1024:4 = MOV32rr %EAX
- isInsSubReg = true;
- if (SrcIsPhys) {
- if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
- return false; // Not coalescable
- SubIdx = 0;
- }
- } else {
- SubIdx = SrcMO->getSubReg();
- if (SubIdx) {
- // This is not a extract_subreg but it looks like one.
- // e.g. %cl = MOV16rr %reg1024:1
- isExtSubReg = true;
- if (DstIsPhys) {
- if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx,RealDstReg))
- return false; // Not coalescable
- SubIdx = 0;
- }
- }
- }
-
- // Now determine the register class of the joined register.
- if (!SrcIsPhys && !DstIsPhys) {
- if (isExtSubReg) {
- NewRC =
- SubIdx ? tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx) : SrcRC;
- } else if (isInsSubReg) {
- NewRC =
- SubIdx ? tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx) : DstRC;
- } else {
- NewRC = getCommonSubClass(SrcRC, DstRC);
- }
-
- if (!NewRC) {
- DEBUG(dbgs() << "\tDisjoint regclasses: "
- << SrcRC->getName() << ", "
- << DstRC->getName() << ".\n");
- return false; // Not coalescable.
}
-
- // If we are joining two virtual registers and the resulting register
- // class is more restrictive (fewer register, smaller size). Check if it's
- // worth doing the merge.
- if (!isWinToJoinCrossClass(SrcReg, DstReg, SrcRC, DstRC, NewRC)) {
+ if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
+ mri_->getRegClass(CP.getSrcReg()),
+ mri_->getRegClass(CP.getDstReg()),
+ CP.getNewRC())) {
DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
- << SrcRC->getName() << "/"
- << DstRC->getName() << " -> "
- << NewRC->getName() << ".\n");
- // Allow the coalescer to try again in case either side gets coalesced to
- // a physical register that's compatible with the other side. e.g.
- // r1024 = MOV32to32_ r1025
- // But later r1024 is assigned EAX then r1025 may be coalesced with EAX.
+ << CP.getNewRC()->getName() << ".\n");
Again = true; // May be possible to coalesce later.
return false;
}
}
- }
-
- // Will it create illegal extract_subreg / insert_subreg?
- if (SrcIsPhys && HasIncompatibleSubRegDefUse(CopyMI, DstReg, SrcReg))
- return false;
- if (DstIsPhys && HasIncompatibleSubRegDefUse(CopyMI, SrcReg, DstReg))
- return false;
-
- LiveInterval &SrcInt = li_->getInterval(SrcReg);
- LiveInterval &DstInt = li_->getInterval(DstReg);
- assert(SrcInt.reg == SrcReg && DstInt.reg == DstReg &&
- "Register mapping is horribly broken!");
- DEBUG({
- dbgs() << "\t\tInspecting ";
- if (SrcRC) dbgs() << SrcRC->getName() << ": ";
- SrcInt.print(dbgs(), tri_);
- dbgs() << "\n\t\t and ";
- if (DstRC) dbgs() << DstRC->getName() << ": ";
- DstInt.print(dbgs(), tri_);
- dbgs() << "\n";
- });
+ // When possible, let DstReg be the larger interval.
+ if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
+ li_->getInterval(CP.getDstReg()).ranges.size())
+ CP.flip();
+ }
+
+ // We need to be careful about coalescing a source physical register with a
+ // virtual register. Once the coalescing is done, it cannot be broken and
+ // these are not spillable! If the destination interval uses are far away,
+ // think twice about coalescing them!
+ // FIXME: Why are we skipping this test for partial copies?
+ // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
+ if (!CP.isPartial() && CP.isPhys()) {
+ LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
+
+ // Don't join with physregs that have a ridiculous number of live
+ // ranges. The data structure performance is really bad when that
+ // happens.
+ if (li_->hasInterval(CP.getDstReg()) &&
+ li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
+ mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg());
+ ++numAborts;
+ DEBUG(dbgs()
+ << "\tPhysical register live interval too complicated, abort!\n");
+ return false;
+ }
- // Save a copy of the virtual register live interval. We'll manually
- // merge this into the "real" physical register live interval this is
- // coalesced with.
- OwningPtr<LiveInterval> SavedLI;
- if (RealDstReg)
- SavedLI.reset(li_->dupInterval(&SrcInt));
- else if (RealSrcReg)
- SavedLI.reset(li_->dupInterval(&DstInt));
-
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg) {
- // Check if it is necessary to propagate "isDead" property.
- MachineOperand *mopd = CopyMI->findRegisterDefOperand(DstReg, false);
- bool isDead = mopd->isDead();
-
- // We need to be careful about coalescing a source physical register with a
- // virtual register. Once the coalescing is done, it cannot be broken and
- // these are not spillable! If the destination interval uses are far away,
- // think twice about coalescing them!
- if (!isDead && (SrcIsPhys || DstIsPhys)) {
- // If the virtual register live interval is long but it has low use
- // density, do not join them, instead mark the physical register as its
- // allocation preference.
- LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt;
- LiveInterval &JoinPInt = SrcIsPhys ? SrcInt : DstInt;
- unsigned JoinVReg = SrcIsPhys ? DstReg : SrcReg;
- unsigned JoinPReg = SrcIsPhys ? SrcReg : DstReg;
-
- // Don't join with physregs that have a ridiculous number of live
- // ranges. The data structure performance is really bad when that
- // happens.
- if (JoinPInt.ranges.size() > 1000) {
- mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
- ++numAborts;
- DEBUG(dbgs()
- << "\tPhysical register live interval too complicated, abort!\n");
- return false;
- }
+ const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
+ unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+ unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
+ if (Length > Threshold &&
+ std::distance(mri_->use_nodbg_begin(CP.getSrcReg()),
+ mri_->use_nodbg_end()) * Threshold < Length) {
+ // Before giving up coalescing, if definition of source is defined by
+ // trivial computation, try rematerializing it.
+ if (!CP.isFlipped() &&
+ ReMaterializeTrivialDef(JoinVInt, CP.getDstReg(), 0, CopyMI))
+ return true;
- const TargetRegisterClass *RC = mri_->getRegClass(JoinVReg);
- unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
- unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
- if (Length > Threshold &&
- std::distance(mri_->use_nodbg_begin(JoinVReg),
- mri_->use_nodbg_end()) * Threshold < Length) {
- // Before giving up coalescing, if definition of source is defined by
- // trivial computation, try rematerializing it.
- if (ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
- return true;
-
- mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
- ++numAborts;
- DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
- Again = true; // May be possible to coalesce later.
- return false;
- }
+ mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg());
+ ++numAborts;
+ DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
+ Again = true; // May be possible to coalesce later.
+ return false;
}
}
@@ -1724,32 +1168,24 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// Otherwise, if one of the intervals being joined is a physreg, this method
// always canonicalizes DstInt to be it. The output "SrcInt" will not have
// been modified, so we can use this information below to update aliases.
- bool Swapped = false;
- // If SrcInt is implicitly defined, it's safe to coalesce.
- if (SrcInt.empty()) {
- if (!CanCoalesceWithImpDef(CopyMI, DstInt, SrcInt)) {
- // Only coalesce an empty interval (defined by implicit_def) with
- // another interval which has a valno defined by the CopyMI and the CopyMI
- // is a kill of the implicit def.
- DEBUG(dbgs() << "\tNot profitable!\n");
- return false;
- }
- } else if (!JoinIntervals(DstInt, SrcInt, Swapped)) {
+ if (!JoinIntervals(CP)) {
// Coalescing failed.
// If definition of source is defined by trivial computation, try
// rematerializing it.
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
- ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
+ if (!CP.isFlipped() &&
+ ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()),
+ CP.getDstReg(), 0, CopyMI))
return true;
// If we can eliminate the copy without merging the live ranges, do so now.
- if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
- (AdjustCopiesBackFrom(SrcInt, DstInt, CopyMI) ||
- RemoveCopyByCommutingDef(SrcInt, DstInt, CopyMI))) {
- JoinedCopies.insert(CopyMI);
- DEBUG(dbgs() << "\tTrivial!\n");
- return true;
+ if (!CP.isPartial()) {
+ if (AdjustCopiesBackFrom(CP, CopyMI) ||
+ RemoveCopyByCommutingDef(CP, CopyMI)) {
+ JoinedCopies.insert(CopyMI);
+ DEBUG(dbgs() << "\tTrivial!\n");
+ return true;
+ }
}
// Otherwise, we are unable to join the intervals.
@@ -1758,86 +1194,32 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
return false;
}
- LiveInterval *ResSrcInt = &SrcInt;
- LiveInterval *ResDstInt = &DstInt;
- if (Swapped) {
- std::swap(SrcReg, DstReg);
- std::swap(ResSrcInt, ResDstInt);
- }
- assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
- "LiveInterval::join didn't work right!");
-
- // If we're about to merge live ranges into a physical register live interval,
- // we have to update any aliased register's live ranges to indicate that they
- // have clobbered values for this range.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- // If this is a extract_subreg where dst is a physical register, e.g.
- // cl = EXTRACT_SUBREG reg1024, 1
- // then create and update the actual physical register allocated to RHS.
- if (RealDstReg || RealSrcReg) {
- LiveInterval &RealInt =
- li_->getOrCreateInterval(RealDstReg ? RealDstReg : RealSrcReg);
- for (LiveInterval::const_vni_iterator I = SavedLI->vni_begin(),
- E = SavedLI->vni_end(); I != E; ++I) {
- const VNInfo *ValNo = *I;
- VNInfo *NewValNo = RealInt.getNextValue(ValNo->def, ValNo->getCopy(),
- false, // updated at *
- li_->getVNInfoAllocator());
- NewValNo->setFlags(ValNo->getFlags()); // * updated here.
- RealInt.addKills(NewValNo, ValNo->kills);
- RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo);
- }
- RealInt.weight += SavedLI->weight;
- DstReg = RealDstReg ? RealDstReg : RealSrcReg;
- }
-
- // Update the liveintervals of sub-registers.
- for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
- li_->getVNInfoAllocator());
- }
-
- // If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
- // larger super-register.
- if ((isExtSubReg || isInsSubReg || isSubRegToReg) &&
- !SrcIsPhys && !DstIsPhys) {
- if ((isExtSubReg && !Swapped) ||
- ((isInsSubReg || isSubRegToReg) && Swapped)) {
- ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator());
- std::swap(SrcReg, DstReg);
- std::swap(ResSrcInt, ResDstInt);
- }
- }
-
// Coalescing to a virtual register that is of a sub-register class of the
// other. Make sure the resulting register is set to the right register class.
- if (CrossRC)
+ if (CP.isCrossClass()) {
++numCrossRCs;
-
- // This may happen even if it's cross-rc coalescing. e.g.
- // %reg1026<def> = SUBREG_TO_REG 0, %reg1037<kill>, 4
- // reg1026 -> GR64, reg1037 -> GR32_ABCD. The resulting register will have to
- // be allocate a register from GR64_ABCD.
- if (NewRC)
- mri_->setRegClass(DstReg, NewRC);
+ mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
+ }
// Remember to delete the copy instruction.
JoinedCopies.insert(CopyMI);
- UpdateRegDefsUses(SrcReg, DstReg, SubIdx);
+ UpdateRegDefsUses(CP);
// If we have extended the live range of a physical register, make sure we
// update live-in lists as well.
- if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
- const LiveInterval &VRegInterval = li_->getInterval(SrcReg);
+ if (CP.isPhys()) {
SmallVector<MachineBasicBlock*, 16> BlockSeq;
- for (LiveInterval::const_iterator I = VRegInterval.begin(),
- E = VRegInterval.end(); I != E; ++I ) {
+ // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
+ // ranges for this, and they are preserved.
+ LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
+ for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
+ I != E; ++I ) {
li_->findLiveInMBBs(I->start, I->end, BlockSeq);
for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
MachineBasicBlock &block = *BlockSeq[idx];
- if (!block.isLiveIn(DstReg))
- block.addLiveIn(DstReg);
+ if (!block.isLiveIn(CP.getDstReg()))
+ block.addLiveIn(CP.getDstReg());
}
BlockSeq.clear();
}
@@ -1845,32 +1227,17 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// SrcReg is guarateed to be the register whose live interval that is
// being merged.
- li_->removeInterval(SrcReg);
+ li_->removeInterval(CP.getSrcReg());
// Update regalloc hint.
- tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_);
-
- // Manually deleted the live interval copy.
- if (SavedLI) {
- SavedLI->clear();
- SavedLI.reset();
- }
-
- // If resulting interval has a preference that no longer fits because of subreg
- // coalescing, just clear the preference.
- unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_);
- if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
- TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
- const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
- if (!RC->contains(Preference))
- mri_->setRegAllocationHint(ResDstInt->reg, 0, 0);
- }
+ tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
DEBUG({
- dbgs() << "\t\tJoined. Result = ";
- ResDstInt->print(dbgs(), tri_);
- dbgs() << "\n";
- });
+ LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
+ dbgs() << "\tJoined. Result = ";
+ DstInt.print(dbgs(), tri_);
+ dbgs() << "\n";
+ });
++numJoins;
return true;
@@ -1927,263 +1294,53 @@ static unsigned ComputeUltimateVN(VNInfo *VNI,
return ThisValNoAssignments[VN] = UltimateVN;
}
-static bool InVector(VNInfo *Val, const SmallVector<VNInfo*, 8> &V) {
- return std::find(V.begin(), V.end(), Val) != V.end();
-}
-
-static bool isValNoDefMove(const MachineInstr *MI, unsigned DR, unsigned SR,
- const TargetInstrInfo *TII,
- const TargetRegisterInfo *TRI) {
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
- ;
- else if (MI->isExtractSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
- } else if (MI->isSubregToReg() ||
- MI->isInsertSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(2).getReg();
- } else
- return false;
- return (SrcReg == SR || TRI->isSuperRegister(SR, SrcReg)) &&
- (DstReg == DR || TRI->isSuperRegister(DR, DstReg));
-}
-
-/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
-/// the specified live interval is defined by a copy from the specified
-/// register.
-bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
- LiveRange *LR,
- unsigned Reg) {
- unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno);
- if (SrcReg == Reg)
- return true;
- // FIXME: Do isPHIDef and isDefAccurate both need to be tested?
- if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
- TargetRegisterInfo::isPhysicalRegister(li.reg) &&
- *tri_->getSuperRegisters(li.reg)) {
- // It's a sub-register live interval, we may not have precise information.
- // Re-compute it.
- MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
- if (DefMI && isValNoDefMove(DefMI, li.reg, Reg, tii_, tri_)) {
- // Cache computed info.
- LR->valno->def = LR->start;
- LR->valno->setCopy(DefMI);
- return true;
- }
- }
- return false;
-}
-
-
-/// ValueLiveAt - Return true if the LiveRange pointed to by the given
-/// iterator, or any subsequent range with the same value number,
-/// is live at the given point.
-bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
- LiveInterval::iterator LREnd,
- SlotIndex defPoint) const {
- for (const VNInfo *valno = LRItr->valno;
- (LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
- if (LRItr->contains(defPoint))
- return true;
- }
-
- return false;
-}
-
-
-/// SimpleJoin - Attempt to joint the specified interval into this one. The
-/// caller of this method must guarantee that the RHS only contains a single
-/// value number and that the RHS is not defined by a copy from this
-/// interval. This returns false if the intervals are not joinable, or it
-/// joins them and returns true.
-bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
- assert(RHS.containsOneValue());
-
- // Some number (potentially more than one) value numbers in the current
- // interval may be defined as copies from the RHS. Scan the overlapping
- // portions of the LHS and RHS, keeping track of this and looking for
- // overlapping live ranges that are NOT defined as copies. If these exist, we
- // cannot coalesce.
-
- LiveInterval::iterator LHSIt = LHS.begin(), LHSEnd = LHS.end();
- LiveInterval::iterator RHSIt = RHS.begin(), RHSEnd = RHS.end();
-
- if (LHSIt->start < RHSIt->start) {
- LHSIt = std::upper_bound(LHSIt, LHSEnd, RHSIt->start);
- if (LHSIt != LHS.begin()) --LHSIt;
- } else if (RHSIt->start < LHSIt->start) {
- RHSIt = std::upper_bound(RHSIt, RHSEnd, LHSIt->start);
- if (RHSIt != RHS.begin()) --RHSIt;
- }
-
- SmallVector<VNInfo*, 8> EliminatedLHSVals;
-
- while (1) {
- // Determine if these live intervals overlap.
- bool Overlaps = false;
- if (LHSIt->start <= RHSIt->start)
- Overlaps = LHSIt->end > RHSIt->start;
- else
- Overlaps = RHSIt->end > LHSIt->start;
-
- // If the live intervals overlap, there are two interesting cases: if the
- // LHS interval is defined by a copy from the RHS, it's ok and we record
- // that the LHS value # is the same as the RHS. If it's not, then we cannot
- // coalesce these live ranges and we bail out.
- if (Overlaps) {
- // If we haven't already recorded that this value # is safe, check it.
- if (!InVector(LHSIt->valno, EliminatedLHSVals)) {
- // If it's re-defined by an early clobber somewhere in the live range,
- // then conservatively abort coalescing.
- if (LHSIt->valno->hasRedefByEC())
- return false;
- // Copy from the RHS?
- if (!RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg))
- return false; // Nope, bail out.
-
- if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
- // Here is an interesting situation:
- // BB1:
- // vr1025 = copy vr1024
- // ..
- // BB2:
- // vr1024 = op
- // = vr1025
- // Even though vr1025 is copied from vr1024, it's not safe to
- // coalesce them since the live range of vr1025 intersects the
- // def of vr1024. This happens because vr1025 is assigned the
- // value of the previous iteration of vr1024.
+/// JoinIntervals - Attempt to join these two intervals. On failure, this
+/// returns false.
+bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
+ LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
+ DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
+
+ // If a live interval is a physical register, check for interference with any
+ // aliases. The interference check implemented here is a bit more conservative
+ // than the full interfeence check below. We allow overlapping live ranges
+ // only when one is a copy of the other.
+ if (CP.isPhys()) {
+ for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
+ if (!li_->hasInterval(*AS))
+ continue;
+ const LiveInterval &LHS = li_->getInterval(*AS);
+ LiveInterval::const_iterator LI = LHS.begin();
+ for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
+ RI != RE; ++RI) {
+ LI = std::lower_bound(LI, LHS.end(), RI->start);
+ // Does LHS have an overlapping live range starting before RI?
+ if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
+ (RI->start != RI->valno->def ||
+ !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
+ DEBUG({
+ dbgs() << "\t\tInterference from alias: ";
+ LHS.print(dbgs(), tri_);
+ dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
+ });
return false;
- EliminatedLHSVals.push_back(LHSIt->valno);
- }
-
- // We know this entire LHS live range is okay, so skip it now.
- if (++LHSIt == LHSEnd) break;
- continue;
- }
+ }
- if (LHSIt->end < RHSIt->end) {
- if (++LHSIt == LHSEnd) break;
- } else {
- // One interesting case to check here. It's possible that we have
- // something like "X3 = Y" which defines a new value number in the LHS,
- // and is the last use of this liverange of the RHS. In this case, we
- // want to notice this copy (so that it gets coalesced away) even though
- // the live ranges don't actually overlap.
- if (LHSIt->start == RHSIt->end) {
- if (InVector(LHSIt->valno, EliminatedLHSVals)) {
- // We already know that this value number is going to be merged in
- // if coalescing succeeds. Just skip the liverange.
- if (++LHSIt == LHSEnd) break;
- } else {
- // If it's re-defined by an early clobber somewhere in the live range,
- // then conservatively abort coalescing.
- if (LHSIt->valno->hasRedefByEC())
+ // Check that LHS ranges beginning in this range are copies.
+ for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
+ if (LI->start != LI->valno->def ||
+ !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
+ DEBUG({
+ dbgs() << "\t\tInterference from alias: ";
+ LHS.print(dbgs(), tri_);
+ dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
+ });
return false;
- // Otherwise, if this is a copy from the RHS, mark it as being merged
- // in.
- if (RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg)) {
- if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
- // Here is an interesting situation:
- // BB1:
- // vr1025 = copy vr1024
- // ..
- // BB2:
- // vr1024 = op
- // = vr1025
- // Even though vr1025 is copied from vr1024, it's not safe to
- // coalesced them since live range of vr1025 intersects the
- // def of vr1024. This happens because vr1025 is assigned the
- // value of the previous iteration of vr1024.
- return false;
- EliminatedLHSVals.push_back(LHSIt->valno);
-
- // We know this entire LHS live range is okay, so skip it now.
- if (++LHSIt == LHSEnd) break;
}
}
}
-
- if (++RHSIt == RHSEnd) break;
- }
- }
-
- // If we got here, we know that the coalescing will be successful and that
- // the value numbers in EliminatedLHSVals will all be merged together. Since
- // the most common case is that EliminatedLHSVals has a single number, we
- // optimize for it: if there is more than one value, we merge them all into
- // the lowest numbered one, then handle the interval as if we were merging
- // with one value number.
- VNInfo *LHSValNo = NULL;
- if (EliminatedLHSVals.size() > 1) {
- // Loop through all the equal value numbers merging them into the smallest
- // one.
- VNInfo *Smallest = EliminatedLHSVals[0];
- for (unsigned i = 1, e = EliminatedLHSVals.size(); i != e; ++i) {
- if (EliminatedLHSVals[i]->id < Smallest->id) {
- // Merge the current notion of the smallest into the smaller one.
- LHS.MergeValueNumberInto(Smallest, EliminatedLHSVals[i]);
- Smallest = EliminatedLHSVals[i];
- } else {
- // Merge into the smallest.
- LHS.MergeValueNumberInto(EliminatedLHSVals[i], Smallest);
- }
}
- LHSValNo = Smallest;
- } else if (EliminatedLHSVals.empty()) {
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
- *tri_->getSuperRegisters(LHS.reg))
- // Imprecise sub-register information. Can't handle it.
- return false;
- llvm_unreachable("No copies from the RHS?");
- } else {
- LHSValNo = EliminatedLHSVals[0];
- }
-
- // Okay, now that there is a single LHS value number that we're merging the
- // RHS into, update the value number info for the LHS to indicate that the
- // value number is defined where the RHS value number was.
- const VNInfo *VNI = RHS.getValNumInfo(0);
- LHSValNo->def = VNI->def;
- LHSValNo->setCopy(VNI->getCopy());
-
- // Okay, the final step is to loop over the RHS live intervals, adding them to
- // the LHS.
- if (VNI->hasPHIKill())
- LHSValNo->setHasPHIKill(true);
- LHS.addKills(LHSValNo, VNI->kills);
- LHS.MergeRangesInAsValue(RHS, LHSValNo);
-
- LHS.ComputeJoinedWeight(RHS);
-
- // Update regalloc hint if both are virtual registers.
- if (TargetRegisterInfo::isVirtualRegister(LHS.reg) &&
- TargetRegisterInfo::isVirtualRegister(RHS.reg)) {
- std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg);
- std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg);
- if (RHSPref != LHSPref)
- mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second);
}
- // Update the liveintervals of sub-registers.
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
- for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
- li_->getVNInfoAllocator());
-
- return true;
-}
-
-/// JoinIntervals - Attempt to join these two intervals. On failure, this
-/// returns false. Otherwise, if one of the intervals being joined is a
-/// physreg, this method always canonicalizes LHS to be it. The output
-/// "RHS" will not have been modified, so we can use this information
-/// below to update aliases.
-bool
-SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
- bool &Swapped) {
// Compute the final value assignment, assuming that the live ranges can be
// coalesced.
SmallVector<int, 16> LHSValNoAssignments;
@@ -2192,203 +1349,87 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
SmallVector<VNInfo*, 16> NewVNInfo;
- // If a live interval is a physical register, conservatively check if any
- // of its sub-registers is overlapping the live interval of the virtual
- // register. If so, do not coalesce.
- if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
- *tri_->getSubRegisters(LHS.reg)) {
- // If it's coalescing a virtual register to a physical register, estimate
- // its live interval length. This is the *cost* of scanning an entire live
- // interval. If the cost is low, we'll do an exhaustive check instead.
-
- // If this is something like this:
- // BB1:
- // v1024 = op
- // ...
- // BB2:
- // ...
- // RAX = v1024
- //
- // That is, the live interval of v1024 crosses a bb. Then we can't rely on
- // less conservative check. It's possible a sub-register is defined before
- // v1024 (or live in) and live out of BB1.
- if (RHS.containsOneValue() &&
- li_->intervalIsInOneMBB(RHS) &&
- li_->getApproximateInstructionCount(RHS) <= 10) {
- // Perform a more exhaustive check for some common cases.
- if (li_->conflictsWithSubPhysRegRef(RHS, LHS.reg, true, JoinedCopies))
- return false;
- } else {
- for (const unsigned* SR = tri_->getSubRegisters(LHS.reg); *SR; ++SR)
- if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "\tInterfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false;
- }
- }
- } else if (TargetRegisterInfo::isPhysicalRegister(RHS.reg) &&
- *tri_->getSubRegisters(RHS.reg)) {
- if (LHS.containsOneValue() &&
- li_->getApproximateInstructionCount(LHS) <= 10) {
- // Perform a more exhaustive check for some common cases.
- if (li_->conflictsWithSubPhysRegRef(LHS, RHS.reg, false, JoinedCopies))
- return false;
- } else {
- for (const unsigned* SR = tri_->getSubRegisters(RHS.reg); *SR; ++SR)
- if (li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
- DEBUG({
- dbgs() << "\tInterfere with sub-register ";
- li_->getInterval(*SR).print(dbgs(), tri_);
- });
- return false;
- }
- }
- }
+ LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
+ DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
- // Compute ultimate value numbers for the LHS and RHS values.
- if (RHS.containsOneValue()) {
- // Copies from a liveinterval with a single value are simple to handle and
- // very common, handle the special case here. This is important, because
- // often RHS is small and LHS is large (e.g. a physreg).
-
- // Find out if the RHS is defined as a copy from some value in the LHS.
- int RHSVal0DefinedFromLHS = -1;
- int RHSValID = -1;
- VNInfo *RHSValNoInfo = NULL;
- VNInfo *RHSValNoInfo0 = RHS.getValNumInfo(0);
- unsigned RHSSrcReg = li_->getVNInfoSourceReg(RHSValNoInfo0);
- if (RHSSrcReg == 0 || RHSSrcReg != LHS.reg) {
- // If RHS is not defined as a copy from the LHS, we can use simpler and
- // faster checks to see if the live ranges are coalescable. This joiner
- // can't swap the LHS/RHS intervals though.
- if (!TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
- return SimpleJoin(LHS, RHS);
- } else {
- RHSValNoInfo = RHSValNoInfo0;
- }
- } else {
- // It was defined as a copy from the LHS, find out what value # it is.
- RHSValNoInfo =
- LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
- RHSValID = RHSValNoInfo->id;
- RHSVal0DefinedFromLHS = RHSValID;
- }
+ // Loop over the value numbers of the LHS, seeing if any are defined from
+ // the RHS.
+ for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ continue;
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- NewVNInfo.resize(LHS.getNumValNums(), NULL);
-
- // Okay, *all* of the values in LHS that are defined as a copy from RHS
- // should now get updated.
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (unsigned LHSSrcReg = li_->getVNInfoSourceReg(VNI)) {
- if (LHSSrcReg != RHS.reg) {
- // If this is not a copy from the RHS, its value number will be
- // unmodified by the coalescing.
- NewVNInfo[VN] = VNI;
- LHSValNoAssignments[VN] = VN;
- } else if (RHSValID == -1) {
- // Otherwise, it is a copy from the RHS, and we don't already have a
- // value# for it. Keep the current value number, but remember it.
- LHSValNoAssignments[VN] = RHSValID = VN;
- NewVNInfo[VN] = RHSValNoInfo;
- LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
- } else {
- // Otherwise, use the specified value #.
- LHSValNoAssignments[VN] = RHSValID;
- if (VN == (unsigned)RHSValID) { // Else this val# is dead.
- NewVNInfo[VN] = RHSValNoInfo;
- LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
- }
- }
- } else {
- NewVNInfo[VN] = VNI;
- LHSValNoAssignments[VN] = VN;
- }
- }
+ // Never join with a register that has EarlyClobber redefs.
+ if (VNI->hasRedefByEC())
+ return false;
- assert(RHSValID != -1 && "Didn't find value #?");
- RHSValNoAssignments[0] = RHSValID;
- if (RHSVal0DefinedFromLHS != -1) {
- // This path doesn't go through ComputeUltimateVN so just set
- // it to anything.
- RHSValsDefinedFromLHS[RHSValNoInfo0] = (VNInfo*)1;
- }
- } else {
- // Loop over the value numbers of the LHS, seeing if any are defined from
- // the RHS.
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
- continue;
+ // DstReg is known to be a register in the LHS interval. If the src is
+ // from the RHS interval, we can use its value #.
+ if (!CP.isCoalescable(VNI->getCopy()))
+ continue;
- // DstReg is known to be a register in the LHS interval. If the src is
- // from the RHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != RHS.reg)
- continue;
+ // Figure out the value # from the RHS.
+ LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
+ LHSValsDefinedFromRHS[VNI] = lr->valno;
+ }
- // Figure out the value # from the RHS.
- LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
- LHSValsDefinedFromRHS[VNI] = lr->valno;
- }
+ // Loop over the value numbers of the RHS, seeing if any are defined from
+ // the LHS.
+ for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
+ continue;
- // Loop over the value numbers of the RHS, seeing if any are defined from
- // the LHS.
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy?
- continue;
+ // Never join with a register that has EarlyClobber redefs.
+ if (VNI->hasRedefByEC())
+ return false;
- // DstReg is known to be a register in the RHS interval. If the src is
- // from the LHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != LHS.reg)
- continue;
+ // DstReg is known to be a register in the RHS interval. If the src is
+ // from the LHS interval, we can use its value #.
+ if (!CP.isCoalescable(VNI->getCopy()))
+ continue;
- // Figure out the value # from the LHS.
- LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
- RHSValsDefinedFromLHS[VNI] = lr->valno;
- }
+ // Figure out the value # from the LHS.
+ LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
+ RHSValsDefinedFromLHS[VNI] = lr->valno;
+ }
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
+ LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
+ RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
+ NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
- for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
- continue;
- ComputeUltimateVN(VNI, NewVNInfo,
- LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
- LHSValNoAssignments, RHSValNoAssignments);
+ for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ unsigned VN = VNI->id;
+ if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+ continue;
+ ComputeUltimateVN(VNI, NewVNInfo,
+ LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
+ LHSValNoAssignments, RHSValNoAssignments);
+ }
+ for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+ i != e; ++i) {
+ VNInfo *VNI = *i;
+ unsigned VN = VNI->id;
+ if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+ continue;
+ // If this value number isn't a copy from the LHS, it's a new number.
+ if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
+ NewVNInfo.push_back(VNI);
+ RHSValNoAssignments[VN] = NewVNInfo.size()-1;
+ continue;
}
- for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
- i != e; ++i) {
- VNInfo *VNI = *i;
- unsigned VN = VNI->id;
- if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
- continue;
- // If this value number isn't a copy from the LHS, it's a new number.
- if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
- NewVNInfo.push_back(VNI);
- RHSValNoAssignments[VN] = NewVNInfo.size()-1;
- continue;
- }
- ComputeUltimateVN(VNI, NewVNInfo,
- RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
- RHSValNoAssignments, LHSValNoAssignments);
- }
+ ComputeUltimateVN(VNI, NewVNInfo,
+ RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
+ RHSValNoAssignments, LHSValNoAssignments);
}
// Armed with the mappings of LHS/RHS values to ultimate values, walk the
@@ -2399,15 +1440,17 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
LiveInterval::const_iterator JE = RHS.end();
// Skip ahead until the first place of potential sharing.
- if (I->start < J->start) {
- I = std::upper_bound(I, IE, J->start);
- if (I != LHS.begin()) --I;
- } else if (J->start < I->start) {
- J = std::upper_bound(J, JE, I->start);
- if (J != RHS.begin()) --J;
+ if (I != IE && J != JE) {
+ if (I->start < J->start) {
+ I = std::upper_bound(I, IE, J->start);
+ if (I != LHS.begin()) --I;
+ } else if (J->start < I->start) {
+ J = std::upper_bound(J, JE, I->start);
+ if (J != RHS.begin()) --J;
+ }
}
- while (1) {
+ while (I != IE && J != JE) {
// Determine if these two live ranges overlap.
bool Overlaps;
if (I->start < J->start) {
@@ -2429,13 +1472,10 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
return false;
}
- if (I->end < J->end) {
+ if (I->end < J->end)
++I;
- if (I == IE) break;
- } else {
+ else
++J;
- if (J == JE) break;
- }
}
// Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2443,10 +1483,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
VNInfo *VNI = I->first;
unsigned LHSValID = LHSValNoAssignments[VNI->id];
- NewVNInfo[LHSValID]->removeKill(VNI->def);
if (VNI->hasPHIKill())
NewVNInfo[LHSValID]->setHasPHIKill(true);
- RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
}
// Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2454,25 +1492,19 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
VNInfo *VNI = I->first;
unsigned RHSValID = RHSValNoAssignments[VNI->id];
- NewVNInfo[RHSValID]->removeKill(VNI->def);
if (VNI->hasPHIKill())
NewVNInfo[RHSValID]->setHasPHIKill(true);
- LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
}
+ if (LHSValNoAssignments.empty())
+ LHSValNoAssignments.push_back(-1);
+ if (RHSValNoAssignments.empty())
+ RHSValNoAssignments.push_back(-1);
+
// If we get here, we know that we can coalesce the live ranges. Ask the
// intervals to coalesce themselves now.
- if ((RHS.ranges.size() > LHS.ranges.size() &&
- TargetRegisterInfo::isVirtualRegister(LHS.reg)) ||
- TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
- RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo,
- mri_);
- Swapped = true;
- } else {
- LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
- mri_);
- Swapped = false;
- }
+ LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
+ mri_);
return true;
}
@@ -2513,15 +1545,10 @@ void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
// If this isn't a copy nor a extract_subreg, we can't join intervals.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
bool isInsUndef = false;
- if (Inst->isExtractSubreg()) {
+ if (Inst->isCopy()) {
DstReg = Inst->getOperand(0).getReg();
SrcReg = Inst->getOperand(1).getReg();
- } else if (Inst->isInsertSubreg()) {
- DstReg = Inst->getOperand(0).getReg();
- SrcReg = Inst->getOperand(2).getReg();
- if (Inst->getOperand(1).isUndef())
- isInsUndef = true;
- } else if (Inst->isInsertSubreg() || Inst->isSubregToReg()) {
+ } else if (Inst->isSubregToReg()) {
DstReg = Inst->getOperand(0).getReg();
SrcReg = Inst->getOperand(2).getReg();
} else if (!tii_->isMoveInstr(*Inst, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
@@ -2650,6 +1677,8 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
E = mri_->use_nodbg_end(); I != E; ++I) {
MachineOperand &Use = I.getOperand();
MachineInstr *UseMI = Use.getParent();
+ if (UseMI->isIdentityCopy())
+ continue;
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
SrcReg == DstReg && SrcSubIdx == DstSubIdx)
@@ -2680,7 +1709,8 @@ SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
// Ignore identity copies.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (!(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
+ if (!MI->isIdentityCopy() &&
+ !(tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
SrcReg == DstReg && SrcSubIdx == DstSubIdx))
for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
MachineOperand &Use = MI->getOperand(i);
@@ -2750,10 +1780,9 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
// Delete all coalesced copies.
bool DoDelete = true;
if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- assert((MI->isExtractSubreg() || MI->isInsertSubreg() ||
- MI->isSubregToReg()) && "Unrecognized copy instruction");
- DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+ assert(MI->isCopyLike() && "Unrecognized copy instruction");
+ SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
// Do not delete extract_subreg, insert_subreg of physical
// registers unless the definition is dead. e.g.
// %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
@@ -2762,7 +1791,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
DoDelete = false;
}
if (MI->allDefsAreDead()) {
- LiveInterval &li = li_->getInterval(DstReg);
+ LiveInterval &li = li_->getInterval(SrcReg);
if (!ShortenDeadCopySrcLiveRange(li, MI))
ShortenDeadCopyLiveRange(li, MI);
DoDelete = true;
@@ -2812,12 +1841,13 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
// If the move will be an identity move delete it
bool isMove= tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx);
- if (isMove && SrcReg == DstReg && SrcSubIdx == DstSubIdx) {
+ if (MI->isIdentityCopy() ||
+ (isMove && SrcReg == DstReg && SrcSubIdx == DstSubIdx)) {
if (li_->hasInterval(SrcReg)) {
LiveInterval &RegInt = li_->getInterval(SrcReg);
// If def of this move instruction is dead, remove its live range
- // from the dstination register's live interval.
- if (MI->registerDefIsDead(DstReg)) {
+ // from the destination register's live interval.
+ if (MI->allDefsAreDead()) {
if (!ShortenDeadCopySrcLiveRange(RegInt, MI))
ShortenDeadCopyLiveRange(RegInt, MI);
}
@@ -2832,17 +1862,13 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
// Check for now unnecessary kill flags.
if (li_->isNotInMIMap(MI)) continue;
- SlotIndex UseIdx = li_->getInstructionIndex(MI).getUseIndex();
+ SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg() || !MO.isKill()) continue;
unsigned reg = MO.getReg();
if (!reg || !li_->hasInterval(reg)) continue;
- LiveInterval &LI = li_->getInterval(reg);
- const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
- if (!LR ||
- (!LR->valno->isKill(UseIdx.getDefIndex()) &&
- LR->valno->def != UseIdx.getDefIndex()))
+ if (!li_->getInterval(reg).killedAt(DefIdx))
MO.setIsKill(false);
}
}
diff --git a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
index 1be04f3..e154da6 100644
--- a/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
+++ b/contrib/llvm/lib/CodeGen/SimpleRegisterCoalescing.h
@@ -105,21 +105,12 @@ namespace llvm {
/// possible to coalesce this interval, but it may be possible if other
/// things get coalesced, then it returns true by reference in 'Again'.
bool JoinCopy(CopyRec &TheCopy, bool &Again);
-
+
/// JoinIntervals - Attempt to join these two intervals. On failure, this
- /// returns false. Otherwise, if one of the intervals being joined is a
- /// physreg, this method always canonicalizes DestInt to be it. The output
- /// "SrcInt" will not have been modified, so we can use this information
- /// below to update aliases.
- bool JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, bool &Swapped);
-
- /// SimpleJoin - Attempt to join the specified interval into this one. The
- /// caller of this method must guarantee that the RHS only contains a single
- /// value number and that the RHS is not defined by a copy from this
- /// interval. This returns false if the intervals are not joinable, or it
- /// joins them and returns true.
- bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS);
-
+ /// returns false. The output "SrcInt" will not have been modified, so we can
+ /// use this information below to update aliases.
+ bool JoinIntervals(CoalescerPair &CP);
+
/// Return true if the two specified registers belong to different register
/// classes. The registers may be either phys or virt regs.
bool differingRegisterClasses(unsigned RegA, unsigned RegB) const;
@@ -128,8 +119,7 @@ namespace llvm {
/// the source value number is defined by a copy from the destination reg
/// see if we can merge these two destination reg valno# into a single
/// value number, eliminating a copy.
- bool AdjustCopiesBackFrom(LiveInterval &IntA, LiveInterval &IntB,
- MachineInstr *CopyMI);
+ bool AdjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
/// HasOtherReachingDefs - Return true if there are definitions of IntB
/// other than BValNo val# that can reach uses of AValno val# of IntA.
@@ -140,8 +130,7 @@ namespace llvm {
/// If the source value number is defined by a commutable instruction and
/// its other operand is coalesced to the copy dest register, see if we
/// can transform the copy into a noop by commuting the definition.
- bool RemoveCopyByCommutingDef(LiveInterval &IntA, LiveInterval &IntB,
- MachineInstr *CopyMI);
+ bool RemoveCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic
/// block as the copy instruction, trim the ive interval to the last use
@@ -155,28 +144,6 @@ namespace llvm {
bool ReMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg,
unsigned DstSubIdx, MachineInstr *CopyMI);
- /// CanCoalesceWithImpDef - Returns true if the specified copy instruction
- /// from an implicit def to another register can be coalesced away.
- bool CanCoalesceWithImpDef(MachineInstr *CopyMI,
- LiveInterval &li, LiveInterval &ImpLi) const;
-
- /// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an
- /// implicit_def and it is being removed. Turn all copies from this value#
- /// into implicit_defs.
- void TurnCopiesFromValNoToImpDefs(LiveInterval &li, VNInfo *VNI);
-
- /// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
- /// a virtual destination register with physical source register.
- bool isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt, LiveInterval &SrcInt);
-
- /// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
- /// copy from a virtual source register to a physical destination register.
- bool isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
- MachineBasicBlock *CopyMBB,
- LiveInterval &DstInt, LiveInterval &SrcInt);
-
/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
/// two virtual registers from different register classes.
bool isWinToJoinCrossClass(unsigned SrcReg,
@@ -185,43 +152,12 @@ namespace llvm {
const TargetRegisterClass *DstRC,
const TargetRegisterClass *NewRC);
- /// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
- /// register with a physical register, check if any of the virtual register
- /// operand is a sub-register use or def. If so, make sure it won't result
- /// in an illegal extract_subreg or insert_subreg instruction.
- bool HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
- unsigned VirtReg, unsigned PhysReg);
-
- /// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
- /// an extract_subreg where dst is a physical register, e.g.
- /// cl = EXTRACT_SUBREG reg1024, 1
- bool CanJoinExtractSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
- unsigned SubIdx, unsigned &RealDstReg);
-
- /// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
- /// an insert_subreg where src is a physical register, e.g.
- /// reg1024 = INSERT_SUBREG reg1024, c1, 0
- bool CanJoinInsertSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
- unsigned SubIdx, unsigned &RealDstReg);
-
- /// ValueLiveAt - Return true if the LiveRange pointed to by the given
- /// iterator, or any subsequent range with the same value number,
- /// is live at the given point.
- bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd,
- SlotIndex defPoint) const;
-
- /// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
- /// the specified live interval is defined by a copy from the specified
- /// register.
- bool RangeIsDefinedByCopyFromReg(LiveInterval &li, LiveRange *LR,
- unsigned Reg);
-
/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
/// physical register and the existing subregister number of the def / use
/// being updated is not zero, make sure to set it to the correct physical
/// subregister.
- void UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
+ void UpdateRegDefsUses(const CoalescerPair &CP);
/// ShortenDeadCopyLiveRange - Shorten a live range defined by a dead copy.
/// Return true if live interval is removed.
@@ -238,6 +174,10 @@ namespace llvm {
/// it as well.
bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
+ /// RemoveCopyFlag - If DstReg is no longer defined by CopyMI, clear the
+ /// VNInfo copy flag for DstReg and all aliases.
+ void RemoveCopyFlag(unsigned DstReg, const MachineInstr *CopyMI);
+
/// lastRegisterUse - Returns the last use of the specific register between
/// cycles Start and End or NULL if there are no uses.
MachineOperand *lastRegisterUse(SlotIndex Start, SlotIndex End,
diff --git a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 059e8d6..e90869d 100644
--- a/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/contrib/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -46,6 +46,8 @@ namespace {
Constant *UnregisterFn;
Constant *BuiltinSetjmpFn;
Constant *FrameAddrFn;
+ Constant *StackAddrFn;
+ Constant *StackRestoreFn;
Constant *LSDAAddrFn;
Value *PersonalityFn;
Constant *SelectorFn;
@@ -69,7 +71,7 @@ namespace {
void insertCallSiteStore(Instruction *I, int Number, Value *CallSite);
void markInvokeCallSite(InvokeInst *II, int InvokeNo, Value *CallSite,
SwitchInst *CatchSwitch);
- void splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
+ void splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
bool insertSjLjEHSupport(Function &F);
};
} // end anonymous namespace
@@ -107,6 +109,8 @@ bool SjLjEHPass::doInitialization(Module &M) {
PointerType::getUnqual(FunctionContextTy),
(Type *)0);
FrameAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::frameaddress);
+ StackAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::stacksave);
+ StackRestoreFn = Intrinsic::getDeclaration(&M, Intrinsic::stackrestore);
BuiltinSetjmpFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_setjmp);
LSDAAddrFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_sjlj_lsda);
SelectorFn = Intrinsic::getDeclaration(&M, Intrinsic::eh_selector);
@@ -175,8 +179,10 @@ static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
/// we spill into a stack location, guaranteeing that there is nothing live
/// across the unwind edge. This process also splits all critical edges
/// coming out of invoke's.
+/// FIXME: Move this function to a common utility file (Local.cpp?) so
+/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
-splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
+splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -198,16 +204,33 @@ splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- // This is always a no-op cast because we're casting AI to AI->getType() so
- // src and destination types are identical. BitCast is the only possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Normally its is forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with to simply
- // make NC its user.
- NC->setOperand(0, AI);
+ const Type *Ty = AI->getType();
+ // Aggregate types can't be cast, but are legal argument types, so we have
+ // to handle them differently. We use an extract/insert pair as a
+ // lightweight method to achieve the same goal.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+ Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
+ Instruction *NI = InsertValueInst::Create(AI, EI, 0);
+ NI->insertAfter(EI);
+ AI->replaceAllUsesWith(NI);
+ // Set the operand of the instructions back to the AllocaInst.
+ EI->setOperand(0, AI);
+ NI->setOperand(0, AI);
+ } else {
+ // This is always a no-op cast because we're casting AI to AI->getType()
+ // so src and destination types are identical. BitCast is the only
+ // possibility.
+ CastInst *NC = new BitCastInst(
+ AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
+ AI->replaceAllUsesWith(NC);
+ // Set the operand of the cast instruction back to the AllocaInst.
+ // Normally it's forbidden to replace a CastInst's operand because it
+ // could cause the opcode to reflect an illegal conversion. However,
+ // we're replacing it here with the same value it was constructed with.
+ // We do this because the above replaceAllUsesWith() clobbered the
+ // operand, but we want this one to remain.
+ NC->setOperand(0, AI);
+ }
}
// Finally, scan the code looking for instructions with bad live ranges.
@@ -266,6 +289,9 @@ splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
}
// If we decided we need a spill, do it.
+ // FIXME: Spilling this way is overkill, as it forces all uses of
+ // the value to be reloaded from the stack slot, even those that aren't
+ // in the unwind blocks. We should be more selective.
if (NeedsSpill) {
++NumSpilled;
DemoteRegToStack(*Inst, true);
@@ -294,22 +320,34 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// If we don't have any invokes or unwinds, there's nothing to do.
if (Unwinds.empty() && Invokes.empty()) return false;
- // Find the eh.selector.* and eh.exception calls. We'll use the first
- // eh.selector to determine the right personality function to use. For
- // SJLJ, we always use the same personality for the whole function,
- // not on a per-selector basis.
+ // Find the eh.selector.*, eh.exception and alloca calls.
+ //
+ // Remember any allocas() that aren't in the entry block, as the
+ // jmpbuf saved SP will need to be updated for them.
+ //
+ // We'll use the first eh.selector to determine the right personality
+ // function to use. For SJLJ, we always use the same personality for the
+ // whole function, not on a per-selector basis.
// FIXME: That's a bit ugly. Better way?
SmallVector<CallInst*,16> EH_Selectors;
SmallVector<CallInst*,16> EH_Exceptions;
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
+ SmallVector<Instruction*,16> JmpbufUpdatePoints;
+ // Note: Skip the entry block since there's nothing there that interests
+ // us. eh.selector and eh.exception shouldn't ever be there, and we
+ // want to disregard any allocas that are there.
+ for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (CI->getCalledFunction() == SelectorFn) {
- if (!PersonalityFn) PersonalityFn = CI->getOperand(2);
+ if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
EH_Selectors.push_back(CI);
} else if (CI->getCalledFunction() == ExceptionFn) {
EH_Exceptions.push_back(CI);
+ } else if (CI->getCalledFunction() == StackRestoreFn) {
+ JmpbufUpdatePoints.push_back(CI);
}
+ } else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
+ JmpbufUpdatePoints.push_back(AI);
}
}
}
@@ -329,7 +367,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
- splitLiveRangesLiveAcrossInvokes(Invokes);
+ splitLiveRangesAcrossInvokes(Invokes);
BasicBlock *EntryBB = F.begin();
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
@@ -419,7 +457,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// Populate the Function Context
// 1. LSDA address
// 2. Personality function address
- // 3. jmpbuf (save FP and call eh.sjlj.setjmp)
+ // 3. jmpbuf (save SP, FP and call eh.sjlj.setjmp)
// LSDA address
Idxs[0] = Zero;
@@ -440,31 +478,41 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
new StoreInst(PersonalityFn, PersonalityFieldPtr, true,
EntryBB->getTerminator());
- // Save the frame pointer.
+ // Save the frame pointer.
Idxs[1] = ConstantInt::get(Int32Ty, 5);
- Value *FieldPtr
+ Value *JBufPtr
= GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
"jbuf_gep",
EntryBB->getTerminator());
Idxs[1] = ConstantInt::get(Int32Ty, 0);
- Value *ElemPtr =
- GetElementPtrInst::Create(FieldPtr, Idxs, Idxs+2, "jbuf_fp_gep",
+ Value *FramePtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_fp_gep",
EntryBB->getTerminator());
Value *Val = CallInst::Create(FrameAddrFn,
ConstantInt::get(Int32Ty, 0),
"fp",
EntryBB->getTerminator());
- new StoreInst(Val, ElemPtr, true, EntryBB->getTerminator());
- // Call the setjmp instrinsic. It fills in the rest of the jmpbuf
+ new StoreInst(Val, FramePtr, true, EntryBB->getTerminator());
+
+ // Save the stack pointer.
+ Idxs[1] = ConstantInt::get(Int32Ty, 2);
+ Value *StackPtr =
+ GetElementPtrInst::Create(JBufPtr, Idxs, Idxs+2, "jbuf_sp_gep",
+ EntryBB->getTerminator());
+
+ Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator());
+ new StoreInst(Val, StackPtr, true, EntryBB->getTerminator());
+
+ // Call the setjmp instrinsic. It fills in the rest of the jmpbuf.
Value *SetjmpArg =
- CastInst::Create(Instruction::BitCast, FieldPtr,
+ CastInst::Create(Instruction::BitCast, JBufPtr,
Type::getInt8PtrTy(F.getContext()), "",
EntryBB->getTerminator());
Value *DispatchVal = CallInst::Create(BuiltinSetjmpFn, SetjmpArg,
"dispatch",
EntryBB->getTerminator());
- // check the return value of the setjmp. non-zero goes to dispatcher
+ // check the return value of the setjmp. non-zero goes to dispatcher.
Value *IsNormal = new ICmpInst(EntryBB->getTerminator(),
ICmpInst::ICMP_EQ, DispatchVal, Zero,
"notunwind");
@@ -509,6 +557,16 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
Unwinds[i]->eraseFromParent();
}
+ // Following any allocas not in the entry block, update the saved SP
+ // in the jmpbuf to the new value.
+ for (unsigned i = 0, e = JmpbufUpdatePoints.size(); i != e; ++i) {
+ Instruction *AI = JmpbufUpdatePoints[i];
+ Instruction *StackAddr = CallInst::Create(StackAddrFn, "sp");
+ StackAddr->insertAfter(AI);
+ Instruction *StoreStackAddr = new StoreInst(StackAddr, StackPtr, true);
+ StoreStackAddr->insertAfter(StackAddr);
+ }
+
// Finally, for any returns from this function, if this function contains an
// invoke, add a call to unregister the function context.
for (unsigned i = 0, e = Returns.size(); i != e; ++i)
diff --git a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
index 6110ef5..7a227cf 100644
--- a/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
+++ b/contrib/llvm/lib/CodeGen/SlotIndexes.cpp
@@ -213,9 +213,11 @@ void SlotIndexes::dump() const {
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
- os << getIndex();
+ os << entry().getIndex();
if (isPHI())
os << "*";
+ else
+ os << "LudS"[getSlot()];
}
// Dump a SlotIndex to stderr.
diff --git a/contrib/llvm/lib/CodeGen/Spiller.cpp b/contrib/llvm/lib/CodeGen/Spiller.cpp
index a7b2efe..56bcb28 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.cpp
+++ b/contrib/llvm/lib/CodeGen/Spiller.cpp
@@ -14,18 +14,20 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <set>
using namespace llvm;
namespace {
- enum SpillerName { trivial, standard, splitting };
+ enum SpillerName { trivial, standard, splitting, inline_ };
}
static cl::opt<SpillerName>
@@ -35,6 +37,7 @@ spillerOpt("spiller",
cl::values(clEnumVal(trivial, "trivial spiller"),
clEnumVal(standard, "default spiller"),
clEnumVal(splitting, "splitting spiller"),
+ clEnumValN(inline_, "inline", "inline spiller"),
clEnumValEnd),
cl::init(standard));
@@ -53,8 +56,8 @@ protected:
const TargetInstrInfo *tii;
const TargetRegisterInfo *tri;
VirtRegMap *vrm;
-
- /// Construct a spiller base.
+
+ /// Construct a spiller base.
SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
: mf(mf), lis(lis), vrm(vrm)
{
@@ -67,7 +70,8 @@ protected:
/// Add spill ranges for every use/def of the live interval, inserting loads
/// immediately before each use, and stores after each def. No folding or
/// remat is attempted.
- std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) {
+ void trivialSpillEverywhere(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals) {
DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
assert(li->weight != HUGE_VALF &&
@@ -78,8 +82,6 @@ protected:
DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
- std::vector<LiveInterval*> added;
-
const TargetRegisterClass *trc = mri->getRegClass(li->reg);
unsigned ss = vrm->assignVirt2StackSlot(li->reg);
@@ -96,7 +98,7 @@ protected:
do {
++regItr;
} while (regItr != mri->reg_end() && (&*regItr == mi));
-
+
// Collect uses & defs for this instr.
SmallVector<unsigned, 2> indices;
bool hasUse = false;
@@ -116,7 +118,7 @@ protected:
vrm->assignVirt2StackSlot(newVReg, ss);
LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
newLI->weight = HUGE_VALF;
-
+
// Update the reg operands & kill flags.
for (unsigned i = 0; i < indices.size(); ++i) {
unsigned mopIdx = indices[i];
@@ -136,10 +138,10 @@ protected:
MachineInstr *loadInstr(prior(miItr));
SlotIndex loadIndex =
lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
+ vrm->addSpillSlotUse(ss, loadInstr);
SlotIndex endIndex = loadIndex.getNextIndex();
VNInfo *loadVNI =
newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
- loadVNI->addKill(endIndex);
newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
}
@@ -150,17 +152,15 @@ protected:
MachineInstr *storeInstr(llvm::next(miItr));
SlotIndex storeIndex =
lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
+ vrm->addSpillSlotUse(ss, storeInstr);
SlotIndex beginIndex = storeIndex.getPrevIndex();
VNInfo *storeVNI =
newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
- storeVNI->addKill(storeIndex);
newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
}
- added.push_back(newLI);
+ newIntervals.push_back(newLI);
}
-
- return added;
}
};
@@ -176,11 +176,12 @@ public:
TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
: SpillerBase(mf, lis, vrm) {}
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex*) {
+ void spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &,
+ SlotIndex*) {
// Ignore spillIs - we don't use it.
- return trivialSpillEverywhere(li);
+ trivialSpillEverywhere(li, newIntervals);
}
};
@@ -200,10 +201,13 @@ public:
: lis(lis), loopInfo(loopInfo), vrm(vrm) {}
/// Falls back on LiveIntervals::addIntervalsForSpills.
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex*) {
- return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+ void spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs,
+ SlotIndex*) {
+ std::vector<LiveInterval*> added =
+ lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+ newIntervals.insert(newIntervals.end(), added.begin(), added.end());
}
};
@@ -214,7 +218,7 @@ namespace {
/// When a call to spill is placed this spiller will first try to break the
/// interval up into its component values (one new interval per value).
/// If this fails, or if a call is placed to spill a previously split interval
-/// then the spiller falls back on the standard spilling mechanism.
+/// then the spiller falls back on the standard spilling mechanism.
class SplittingSpiller : public StandardSpiller {
public:
SplittingSpiller(MachineFunction *mf, LiveIntervals *lis,
@@ -226,22 +230,21 @@ public:
tri = mf->getTarget().getRegisterInfo();
}
- std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex *earliestStart) {
-
- if (worthTryingToSplit(li)) {
- return tryVNISplit(li, earliestStart);
- }
- // else
- return StandardSpiller::spill(li, spillIs, earliestStart);
+ void spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs,
+ SlotIndex *earliestStart) {
+ if (worthTryingToSplit(li))
+ tryVNISplit(li, earliestStart);
+ else
+ StandardSpiller::spill(li, newIntervals, spillIs, earliestStart);
}
private:
MachineRegisterInfo *mri;
const TargetInstrInfo *tii;
- const TargetRegisterInfo *tri;
+ const TargetRegisterInfo *tri;
DenseSet<LiveInterval*> alreadySplit;
bool worthTryingToSplit(LiveInterval *li) const {
@@ -258,18 +261,18 @@ private:
SmallVector<VNInfo*, 4> vnis;
std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis));
-
+
for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(),
vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
VNInfo *vni = *vniItr;
-
- // Skip unused VNIs, or VNIs with no kills.
- if (vni->isUnused() || vni->kills.empty())
+
+ // Skip unused VNIs.
+ if (vni->isUnused())
continue;
DEBUG(dbgs() << " Extracted Val #" << vni->id << " as ");
LiveInterval *splitInterval = extractVNI(li, vni);
-
+
if (splitInterval != 0) {
DEBUG(dbgs() << *splitInterval << "\n");
added.push_back(splitInterval);
@@ -281,12 +284,12 @@ private:
} else {
DEBUG(dbgs() << "0\n");
}
- }
+ }
DEBUG(dbgs() << "Original LI: " << *li << "\n");
// If there original interval still contains some live ranges
- // add it to added and alreadySplit.
+ // add it to added and alreadySplit.
if (!li->empty()) {
added.push_back(li);
alreadySplit.insert(li);
@@ -302,16 +305,15 @@ private:
/// Extract the given value number from the interval.
LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
assert(vni->isDefAccurate() || vni->isPHIDef());
- assert(!vni->kills.empty());
- // Create a new vreg and live interval, copy VNI kills & ranges over.
+ // Create a new vreg and live interval, copy VNI ranges over.
const TargetRegisterClass *trc = mri->getRegClass(li->reg);
unsigned newVReg = mri->createVirtualRegister(trc);
vrm->grow();
LiveInterval *newLI = &lis->getOrCreateInterval(newVReg);
VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator());
- // Start by copying all live ranges in the VN to the new interval.
+ // Start by copying all live ranges in the VN to the new interval.
for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end();
rItr != rEnd; ++rItr) {
if (rItr->valno == vni) {
@@ -319,7 +321,7 @@ private:
}
}
- // Erase the old VNI & ranges.
+ // Erase the old VNI & ranges.
li->removeValNo(vni);
// Collect all current uses of the register belonging to the given VNI.
@@ -336,15 +338,13 @@ private:
// Insert a copy at the start of the MBB. The range proceeding the
// copy will be attached to the original LiveInterval.
MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def);
- tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc,
- DebugLoc());
- MachineInstr *copyMI = defMBB->begin();
- copyMI->addRegisterKilled(li->reg, tri);
+ MachineInstr *copyMI = BuildMI(*defMBB, defMBB->begin(), DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
0, false, lis->getVNInfoAllocator());
phiDefVNI->setIsPHIDef(true);
- phiDefVNI->addKill(copyIdx.getDefIndex());
li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
LiveRange *oldPHIDefRange =
newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
@@ -367,8 +367,8 @@ private:
newVNI->setIsPHIDef(false); // not a PHI def anymore.
newVNI->setIsDefAccurate(true);
} else {
- // non-PHI def. Rename the def. If it's two-addr that means renaming the use
- // and inserting a new copy too.
+ // non-PHI def. Rename the def. If it's two-addr that means renaming the
+ // use and inserting a new copy too.
MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def);
// We'll rename this now, so we can remove it from uses.
uses.erase(defInst);
@@ -384,38 +384,26 @@ private:
twoAddrUseIsUndef = true;
}
}
-
+
SlotIndex defIdx = lis->getInstructionIndex(defInst);
newVNI->def = defIdx.getDefIndex();
if (isTwoAddr && !twoAddrUseIsUndef) {
MachineBasicBlock *defMBB = defInst->getParent();
- tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc,
- DebugLoc());
- MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst));
+ MachineInstr *copyMI = BuildMI(*defMBB, defInst, DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- copyMI->addRegisterKilled(li->reg, tri);
LiveRange *origUseRange =
li->getLiveRangeContaining(newVNI->def.getUseIndex());
- VNInfo *origUseVNI = origUseRange->valno;
origUseRange->end = copyIdx.getDefIndex();
- bool updatedKills = false;
- for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) {
- if (origUseVNI->kills[k] == defIdx.getDefIndex()) {
- origUseVNI->kills[k] = copyIdx.getDefIndex();
- updatedKills = true;
- break;
- }
- }
- assert(updatedKills && "Failed to update VNI kill list.");
VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
true, lis->getVNInfoAllocator());
- copyVNI->addKill(defIdx.getDefIndex());
LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
newLI->addRange(copyRange);
- }
+ }
}
-
+
for (std::set<MachineInstr*>::iterator
usesItr = uses.begin(), usesEnd = uses.end();
usesItr != usesEnd; ++usesItr) {
@@ -435,7 +423,7 @@ private:
// Check if this instr is two address.
unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg);
bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx);
-
+
// Rename uses (and defs for two-address instrs).
for (unsigned i = 0; i < useInst->getNumOperands(); ++i) {
MachineOperand &mo = useInst->getOperand(i);
@@ -451,10 +439,9 @@ private:
// reg.
MachineBasicBlock *useMBB = useInst->getParent();
MachineBasicBlock::iterator useItr(useInst);
- tii->copyRegToReg(*useMBB, llvm::next(useItr), li->reg, newVReg, trc, trc,
- DebugLoc());
- MachineInstr *copyMI = llvm::next(useItr);
- copyMI->addRegisterKilled(newVReg, tri);
+ MachineInstr *copyMI = BuildMI(*useMBB, llvm::next(useItr), DebugLoc(),
+ tii->get(TargetOpcode::COPY), newVReg)
+ .addReg(li->reg, RegState::Kill);
SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
// Change the old two-address defined range & vni to start at
@@ -470,56 +457,44 @@ private:
VNInfo *copyVNI =
newLI->getNextValue(useIdx.getDefIndex(), 0, true,
lis->getVNInfoAllocator());
- copyVNI->addKill(copyIdx.getDefIndex());
LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
newLI->addRange(copyRange);
}
}
-
- // Iterate over any PHI kills - we'll need to insert new copies for them.
- for (VNInfo::KillSet::iterator
- killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end();
- killItr != killEnd; ++killItr) {
- SlotIndex killIdx(*killItr);
- if (killItr->isPHI()) {
- MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
- LiveRange *oldKillRange =
- newLI->getLiveRangeContaining(killIdx);
-
- assert(oldKillRange != 0 && "No kill range?");
-
- tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
- li->reg, newVReg, trc, trc,
- DebugLoc());
- MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
- copyMI->addRegisterKilled(newVReg, tri);
- SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- // Save the current end. We may need it to add a new range if the
- // current range runs of the end of the MBB.
- SlotIndex newKillRangeEnd = oldKillRange->end;
- oldKillRange->end = copyIdx.getDefIndex();
+ // Iterate over any PHI kills - we'll need to insert new copies for them.
+ for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
+ LRI != LRE; ++LRI) {
+ if (LRI->valno != newVNI || LRI->end.isPHI())
+ continue;
+ SlotIndex killIdx = LRI->end;
+ MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
+ MachineInstr *copyMI = BuildMI(*killMBB, killMBB->getFirstTerminator(),
+ DebugLoc(), tii->get(TargetOpcode::COPY),
+ li->reg)
+ .addReg(newVReg, RegState::Kill);
+ SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
- if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
- assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
- "PHI kill range doesn't reach kill-block end. Not sane.");
- newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
- newKillRangeEnd, newVNI));
- }
+ // Save the current end. We may need it to add a new range if the
+ // current range runs of the end of the MBB.
+ SlotIndex newKillRangeEnd = LRI->end;
+ LRI->end = copyIdx.getDefIndex();
- *killItr = oldKillRange->end;
- VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
- copyMI, true,
- lis->getVNInfoAllocator());
- newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB));
- newKillVNI->setHasPHIKill(true);
- li->addRange(LiveRange(copyIdx.getDefIndex(),
- lis->getMBBEndIdx(killMBB),
- newKillVNI));
+ if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
+ assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
+ "PHI kill range doesn't reach kill-block end. Not sane.");
+ newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
+ newKillRangeEnd, newVNI));
}
+ VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
+ copyMI, true,
+ lis->getVNInfoAllocator());
+ newKillVNI->setHasPHIKill(true);
+ li->addRange(LiveRange(copyIdx.getDefIndex(),
+ lis->getMBBEndIdx(killMBB),
+ newKillVNI));
}
-
newVNI->setHasPHIKill(false);
return newLI;
@@ -530,6 +505,13 @@ private:
} // end anonymous namespace
+namespace llvm {
+Spiller *createInlineSpiller(MachineFunction*,
+ LiveIntervals*,
+ const MachineLoopInfo*,
+ VirtRegMap*);
+}
+
llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
const MachineLoopInfo *loopInfo,
VirtRegMap *vrm) {
@@ -538,5 +520,6 @@ llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
case trivial: return new TrivialSpiller(mf, lis, vrm);
case standard: return new StandardSpiller(lis, loopInfo, vrm);
case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm);
+ case inline_: return createInlineSpiller(mf, lis, loopInfo, vrm);
}
}
diff --git a/contrib/llvm/lib/CodeGen/Spiller.h b/contrib/llvm/lib/CodeGen/Spiller.h
index dda52e8..450447b 100644
--- a/contrib/llvm/lib/CodeGen/Spiller.h
+++ b/contrib/llvm/lib/CodeGen/Spiller.h
@@ -33,11 +33,19 @@ namespace llvm {
public:
virtual ~Spiller() = 0;
- /// Spill the given live range. The method used will depend on the Spiller
- /// implementation selected.
- virtual std::vector<LiveInterval*> spill(LiveInterval *li,
- SmallVectorImpl<LiveInterval*> &spillIs,
- SlotIndex *earliestIndex = 0) = 0;
+ /// spill - Spill the given live interval. The method used will depend on
+ /// the Spiller implementation selected.
+ ///
+ /// @param li The live interval to be spilled.
+ /// @param spillIs A list of intervals that are about to be spilled,
+ /// and so cannot be used for remat etc.
+ /// @param newIntervals The newly created intervals will be appended here.
+ /// @param earliestIndex The earliest point for splitting. (OK, it's another
+ /// pointer to the allocator guts).
+ virtual void spill(LiveInterval *li,
+ std::vector<LiveInterval*> &newIntervals,
+ SmallVectorImpl<LiveInterval*> &spillIs,
+ SlotIndex *earliestIndex = 0) = 0;
};
diff --git a/contrib/llvm/lib/CodeGen/StackProtector.cpp b/contrib/llvm/lib/CodeGen/StackProtector.cpp
index 8a6a727..ca5c28c 100644
--- a/contrib/llvm/lib/CodeGen/StackProtector.cpp
+++ b/contrib/llvm/lib/CodeGen/StackProtector.cpp
@@ -136,7 +136,7 @@ bool StackProtector::RequiresStackProtector() const {
bool StackProtector::InsertStackProtectors() {
BasicBlock *FailBB = 0; // The basic block to jump to if check fails.
AllocaInst *AI = 0; // Place on stack that stores the stack guard.
- Constant *StackGuardVar = 0; // The stack guard variable.
+ Value *StackGuardVar = 0; // The stack guard variable.
for (Function::iterator I = F->begin(), E = F->end(); I != E; ) {
BasicBlock *BB = I++;
@@ -153,9 +153,17 @@ bool StackProtector::InsertStackProtectors() {
// StackGuard = load __stack_chk_guard
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
//
- PointerType *PtrTy = PointerType::getUnqual(
- Type::getInt8Ty(RI->getContext()));
- StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ const PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
+ unsigned AddressSpace, Offset;
+ if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
+ Constant *OffsetVal =
+ ConstantInt::get(Type::getInt32Ty(RI->getContext()), Offset);
+
+ StackGuardVar = ConstantExpr::getIntToPtr(OffsetVal,
+ PointerType::get(PtrTy, AddressSpace));
+ } else {
+ StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
+ }
BasicBlock &Entry = F->getEntryBlock();
Instruction *InsPt = &Entry.front();
diff --git a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
index 7f3b452..eff3c33 100644
--- a/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/contrib/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -508,8 +509,7 @@ bool StackSlotColoring::PropagateBackward(MachineBasicBlock::iterator MII,
// Abort the use is actually a sub-register def. We don't have enough
// information to figure out if it is really legal.
- if (MO.getSubReg() || MII->isExtractSubreg() ||
- MII->isInsertSubreg() || MII->isSubregToReg())
+ if (MO.getSubReg() || MII->isSubregToReg())
return false;
const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
@@ -571,7 +571,7 @@ bool StackSlotColoring::PropagateForward(MachineBasicBlock::iterator MII,
// Abort the use is actually a sub-register use. We don't have enough
// information to figure out if it is really legal.
- if (MO.getSubReg() || MII->isExtractSubreg())
+ if (MO.getSubReg())
return false;
const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
@@ -610,8 +610,8 @@ StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
DEBUG(MI->dump());
++NumLoadElim;
} else {
- TII->copyRegToReg(*MBB, MI, DstReg, Reg, RC, RC,
- MI->getDebugLoc());
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ DstReg).addReg(Reg);
++NumRegRepl;
}
@@ -627,8 +627,8 @@ StackSlotColoring::UnfoldAndRewriteInstruction(MachineInstr *MI, int OldFI,
DEBUG(MI->dump());
++NumStoreElim;
} else {
- TII->copyRegToReg(*MBB, MI, Reg, SrcReg, RC, RC,
- MI->getDebugLoc());
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), Reg)
+ .addReg(SrcReg);
++NumRegRepl;
}
diff --git a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
index 142398c..59315cf 100644
--- a/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
+++ b/contrib/llvm/lib/CodeGen/StrongPHIElimination.cpp
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterCoalescer.h"
@@ -695,9 +696,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
// Insert copy from curr.second to a temporary at
// the Phi defining curr.second
MachineBasicBlock::iterator PI = MRI.getVRegDef(curr.second);
- TII->copyRegToReg(*PI->getParent(), PI, t,
- curr.second, RC, RC, DebugLoc());
-
+ BuildMI(*PI->getParent(), PI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ t).addReg(curr.second);
DEBUG(dbgs() << "Inserted copy from " << curr.second << " to " << t
<< "\n");
@@ -712,8 +712,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
}
// Insert copy from map[curr.first] to curr.second
- TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), curr.second,
- map[curr.first], RC, RC, DebugLoc());
+ BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), curr.second).addReg(map[curr.first]);
map[curr.first] = curr.second;
DEBUG(dbgs() << "Inserted copy from " << curr.first << " to "
<< curr.second << "\n");
@@ -761,8 +761,8 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
// Insert a copy from dest to a new temporary t at the end of b
unsigned t = MF->getRegInfo().createVirtualRegister(RC);
- TII->copyRegToReg(*MBB, MBB->getFirstTerminator(), t,
- curr.second, RC, RC, DebugLoc());
+ BuildMI(*MBB, MBB->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), t).addReg(curr.second);
map[curr.second] = t;
MachineBasicBlock::iterator TI = MBB->getFirstTerminator();
@@ -830,9 +830,6 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
VNInfo* FirstVN = *Int.vni_begin();
FirstVN->setHasPHIKill(false);
- if (I->getOperand(i).isKill())
- FirstVN->addKill(LI.getInstructionIndex(I).getUseIndex());
-
LiveRange LR (LI.getMBBStartIdx(I->getParent()),
LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
FirstVN);
@@ -959,9 +956,8 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
} else {
// Insert a last-minute copy if a conflict was detected.
const TargetInstrInfo *TII = Fn.getTarget().getInstrInfo();
- const TargetRegisterClass *RC = Fn.getRegInfo().getRegClass(I->first);
- TII->copyRegToReg(*SI->second, SI->second->getFirstTerminator(),
- I->first, SI->first, RC, RC, DebugLoc());
+ BuildMI(*SI->second, SI->second->getFirstTerminator(), DebugLoc(),
+ TII->get(TargetOpcode::COPY), I->first).addReg(SI->first);
LI.renumber();
diff --git a/contrib/llvm/lib/CodeGen/TailDuplication.cpp b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
index f2e2a76..075db80 100644
--- a/contrib/llvm/lib/CodeGen/TailDuplication.cpp
+++ b/contrib/llvm/lib/CodeGen/TailDuplication.cpp
@@ -17,6 +17,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/Target/TargetInstrInfo.h"
@@ -559,11 +560,9 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
}
MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
- const TargetRegisterClass *RC = MRI->getRegClass(CopyInfos[i].first);
- TII->copyRegToReg(*PredBB, Loc, CopyInfos[i].first,
- CopyInfos[i].second, RC,RC, DebugLoc());
- MachineInstr *CopyMI = prior(Loc);
- Copies.push_back(CopyMI);
+ Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ CopyInfos[i].first).addReg(CopyInfos[i].second));
}
NumInstrDups += TailBB->size() - 1; // subtract one for removed branch
@@ -618,11 +617,10 @@ TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
}
MachineBasicBlock::iterator Loc = PrevBB->getFirstTerminator();
for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
- const TargetRegisterClass *RC = MRI->getRegClass(CopyInfos[i].first);
- TII->copyRegToReg(*PrevBB, Loc, CopyInfos[i].first,
- CopyInfos[i].second, RC, RC, DebugLoc());
- MachineInstr *CopyMI = prior(Loc);
- Copies.push_back(CopyMI);
+ Copies.push_back(BuildMI(*PrevBB, Loc, DebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ CopyInfos[i].first)
+ .addReg(CopyInfos[i].second));
}
} else {
// No PHIs to worry about, just splice the instructions over.
diff --git a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
index 0ad6619..cdacb98 100644
--- a/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetInstrInfoImpl.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/SmallVector.h"
@@ -21,11 +22,34 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+/// after it, replacing it with an unconditional branch to NewDest.
+void
+TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const {
+ MachineBasicBlock *MBB = Tail->getParent();
+
+ // Remove all the old successors of MBB from the CFG.
+ while (!MBB->succ_empty())
+ MBB->removeSuccessor(MBB->succ_begin());
+
+ // Remove all the dead instructions from the end of MBB.
+ MBB->erase(Tail, MBB->end());
+
+ // If MBB isn't immediately before MBB, insert a branch to it.
+ if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
+ InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
+ Tail->getDebugLoc());
+ MBB->addSuccessor(NewDest);
+}
+
// commuteInstruction - The default implementation of this method just exchanges
// the two operands returned by findCommutedOpIndices.
MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
@@ -136,17 +160,9 @@ void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock &MBB,
unsigned DestReg,
unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo &TRI) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MachineOperand &MO = MI->getOperand(0);
- if (TargetRegisterInfo::isVirtualRegister(DestReg)) {
- MO.setReg(DestReg);
- MO.setSubReg(SubIdx);
- } else if (SubIdx) {
- MO.setReg(TRI->getSubReg(DestReg, SubIdx));
- } else {
- MO.setReg(DestReg);
- }
+ MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
}
@@ -175,6 +191,47 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
return FnSize;
}
+// If the COPY instruction in MI can be folded to a stack operation, return
+// the register class to use.
+static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
+ unsigned FoldIdx) {
+ assert(MI->isCopy() && "MI must be a COPY instruction");
+ if (MI->getNumOperands() != 2)
+ return 0;
+ assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
+
+ const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
+ const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
+
+ if (FoldOp.getSubReg() || LiveOp.getSubReg())
+ return 0;
+
+ unsigned FoldReg = FoldOp.getReg();
+ unsigned LiveReg = LiveOp.getReg();
+
+ assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
+ "Cannot fold physregs");
+
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
+
+ if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
+ return RC->contains(LiveOp.getReg()) ? RC : 0;
+
+ const TargetRegisterClass *LiveRC = MRI.getRegClass(LiveReg);
+ if (RC == LiveRC || RC->hasSubClass(LiveRC))
+ return RC;
+
+ // FIXME: Allow folding when register classes are memory compatible.
+ return 0;
+}
+
+bool TargetInstrInfoImpl::
+canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
+ return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
+}
+
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
@@ -182,10 +239,9 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const {
/// removing the old instruction and adding the new one in the instruction
/// stream.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+ int FI) const {
unsigned Flags = 0;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (MI->getOperand(Ops[i]).isDef())
@@ -193,34 +249,56 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
else
Flags |= MachineMemOperand::MOLoad;
+ MachineBasicBlock *MBB = MI->getParent();
+ assert(MBB && "foldMemoryOperand needs an inserted instruction");
+ MachineFunction &MF = *MBB->getParent();
+
// Ask the target to do the actual folding.
- MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
- if (!NewMI) return 0;
+ if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
+ // Add a memory operand, foldMemoryOperandImpl doesn't do that.
+ assert((!(Flags & MachineMemOperand::MOStore) ||
+ NewMI->getDesc().mayStore()) &&
+ "Folded a def to a non-store!");
+ assert((!(Flags & MachineMemOperand::MOLoad) ||
+ NewMI->getDesc().mayLoad()) &&
+ "Folded a use to a non-load!");
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
+ Flags, /*Offset=*/0,
+ MFI.getObjectSize(FI),
+ MFI.getObjectAlignment(FI));
+ NewMI->addMemOperand(MF, MMO);
- assert((!(Flags & MachineMemOperand::MOStore) ||
- NewMI->getDesc().mayStore()) &&
- "Folded a def to a non-store!");
- assert((!(Flags & MachineMemOperand::MOLoad) ||
- NewMI->getDesc().mayLoad()) &&
- "Folded a use to a non-load!");
- const MachineFrameInfo &MFI = *MF.getFrameInfo();
- assert(MFI.getObjectOffset(FrameIndex) != -1);
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FrameIndex),
- Flags, /*Offset=*/0,
- MFI.getObjectSize(FrameIndex),
- MFI.getObjectAlignment(FrameIndex));
- NewMI->addMemOperand(MF, MMO);
+ // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
+ return MBB->insert(MI, NewMI);
+ }
- return NewMI;
+ // Straight COPY may fold as load/store.
+ if (!MI->isCopy() || Ops.size() != 1)
+ return 0;
+
+ const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
+ if (!RC)
+ return 0;
+
+ const MachineOperand &MO = MI->getOperand(1-Ops[0]);
+ MachineBasicBlock::iterator Pos = MI;
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+
+ if (Flags == MachineMemOperand::MOStore)
+ storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
+ else
+ loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
+ return --Pos;
}
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr*
-TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr* MI,
+TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
@@ -228,11 +306,15 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
#endif
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
if (!NewMI) return 0;
+ NewMI = MBB.insert(MI, NewMI);
+
// Copy the memoperands from the load to the folded instruction.
NewMI->setMemRefs(LoadMI->memoperands_begin(),
LoadMI->memoperands_end());
@@ -240,11 +322,9 @@ TargetInstrInfo::foldMemoryOperand(MachineFunction &MF,
return NewMI;
}
-bool
-TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr *
- MI,
- AliasAnalysis *
- AA) const {
+bool TargetInstrInfo::
+isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
+ AliasAnalysis *AA) const {
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetMachine &TM = MF.getTarget();
@@ -324,3 +404,31 @@ TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr *
// Everything checked out.
return true;
}
+
+/// isSchedulingBoundary - Test if the given instruction should be
+/// considered a scheduling boundary. This primarily includes labels
+/// and terminators.
+bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const{
+ // Terminators and labels can't be scheduled around.
+ if (MI->getDesc().isTerminator() || MI->isLabel())
+ return true;
+
+ // Don't attempt to schedule around any instruction that defines
+ // a stack-oriented pointer, as it's unlikely to be profitable. This
+ // saves compile time, because it doesn't require every single
+ // stack slot reference to depend on the instruction that does the
+ // modification.
+ const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
+ if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
+ return true;
+
+ return false;
+}
+
+// Default implementation of CreateTargetPostRAHazardRecognizer.
+ScheduleHazardRecognizer *TargetInstrInfoImpl::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
+ return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II);
+}
diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 71ad3fb..a80cfc4 100644
--- a/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/contrib/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -825,32 +825,32 @@ void TargetLoweringObjectFileCOFF::Initialize(MCContext &Ctx,
TargetLoweringObjectFile::Initialize(Ctx, TM);
TextSection =
getContext().getCOFFSection(".text",
- MCSectionCOFF::IMAGE_SCN_CNT_CODE |
- MCSectionCOFF::IMAGE_SCN_MEM_EXECUTE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_CNT_CODE |
+ COFF::IMAGE_SCN_MEM_EXECUTE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getText());
DataSection =
getContext().getCOFFSection(".data",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
ReadOnlySection =
getContext().getCOFFSection(".rdata",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
StaticCtorSection =
getContext().getCOFFSection(".ctors",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
StaticDtorSection =
getContext().getCOFFSection(".dtors",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
// FIXME: We're emitting LSDA info into a readonly section on COFF, even
@@ -859,76 +859,76 @@ void TargetLoweringObjectFileCOFF::Initialize(MCContext &Ctx,
// adjusted or this should be a data section.
LSDASection =
getContext().getCOFFSection(".gcc_except_table",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getReadOnly());
EHFrameSection =
getContext().getCOFFSection(".eh_frame",
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE,
SectionKind::getDataRel());
// Debug info.
DwarfAbbrevSection =
getContext().getCOFFSection(".debug_abbrev",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfInfoSection =
getContext().getCOFFSection(".debug_info",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfLineSection =
getContext().getCOFFSection(".debug_line",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfFrameSection =
getContext().getCOFFSection(".debug_frame",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfPubNamesSection =
getContext().getCOFFSection(".debug_pubnames",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfPubTypesSection =
getContext().getCOFFSection(".debug_pubtypes",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfStrSection =
getContext().getCOFFSection(".debug_str",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfLocSection =
getContext().getCOFFSection(".debug_loc",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfARangesSection =
getContext().getCOFFSection(".debug_aranges",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfRangesSection =
getContext().getCOFFSection(".debug_ranges",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DwarfMacroInfoSection =
getContext().getCOFFSection(".debug_macinfo",
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
- MCSectionCOFF::IMAGE_SCN_MEM_READ,
+ COFF::IMAGE_SCN_MEM_DISCARDABLE |
+ COFF::IMAGE_SCN_MEM_READ,
SectionKind::getMetadata());
DrectveSection =
getContext().getCOFFSection(".drectve",
- MCSectionCOFF::IMAGE_SCN_LNK_INFO,
+ COFF::IMAGE_SCN_LNK_INFO,
SectionKind::getMetadata());
}
@@ -936,27 +936,27 @@ static unsigned
getCOFFSectionFlags(SectionKind K) {
unsigned Flags = 0;
- if (!K.isMetadata())
+ if (K.isMetadata())
Flags |=
- MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE;
+ COFF::IMAGE_SCN_MEM_DISCARDABLE;
else if (K.isText())
Flags |=
- MCSectionCOFF::IMAGE_SCN_MEM_EXECUTE |
- MCSectionCOFF::IMAGE_SCN_CNT_CODE;
+ COFF::IMAGE_SCN_MEM_EXECUTE |
+ COFF::IMAGE_SCN_CNT_CODE;
else if (K.isBSS ())
Flags |=
- MCSectionCOFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE;
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
else if (K.isReadOnly())
Flags |=
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ;
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ;
else if (K.isWriteable())
Flags |=
- MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
- MCSectionCOFF::IMAGE_SCN_MEM_READ |
- MCSectionCOFF::IMAGE_SCN_MEM_WRITE;
+ COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+ COFF::IMAGE_SCN_MEM_READ |
+ COFF::IMAGE_SCN_MEM_WRITE;
return Flags;
}
@@ -995,10 +995,10 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
unsigned Characteristics = getCOFFSectionFlags(Kind);
- Characteristics |= MCSectionCOFF::IMAGE_SCN_LNK_COMDAT;
+ Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
return getContext().getCOFFSection(Name.str(), Characteristics,
- MCSectionCOFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
+ COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
}
if (Kind.isText())
diff --git a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
index 3d10dc1..5649143 100644
--- a/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/contrib/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -33,6 +33,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -381,7 +382,7 @@ static bool isCopyToReg(MachineInstr &MI, const TargetInstrInfo *TII,
DstReg = 0;
unsigned SrcSubIdx, DstSubIdx;
if (!TII->isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
- if (MI.isExtractSubreg()) {
+ if (MI.isCopy()) {
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(1).getReg();
} else if (MI.isInsertSubreg()) {
@@ -897,6 +898,108 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi,
}
}
}
+
+ // If this is an instruction with a load folded into it, try unfolding
+ // the load, e.g. avoid this:
+ // movq %rdx, %rcx
+ // addq (%rax), %rcx
+ // in favor of this:
+ // movq (%rax), %rcx
+ // addq %rdx, %rcx
+ // because it's preferable to schedule a load than a register copy.
+ if (TID.mayLoad() && !regBKilled) {
+ // Determine if a load can be unfolded.
+ unsigned LoadRegIndex;
+ unsigned NewOpc =
+ TII->getOpcodeAfterMemoryUnfold(mi->getOpcode(),
+ /*UnfoldLoad=*/true,
+ /*UnfoldStore=*/false,
+ &LoadRegIndex);
+ if (NewOpc != 0) {
+ const TargetInstrDesc &UnfoldTID = TII->get(NewOpc);
+ if (UnfoldTID.getNumDefs() == 1) {
+ MachineFunction &MF = *mbbi->getParent();
+
+ // Unfold the load.
+ DEBUG(dbgs() << "2addr: UNFOLDING: " << *mi);
+ const TargetRegisterClass *RC =
+ UnfoldTID.OpInfo[LoadRegIndex].getRegClass(TRI);
+ unsigned Reg = MRI->createVirtualRegister(RC);
+ SmallVector<MachineInstr *, 2> NewMIs;
+ if (!TII->unfoldMemoryOperand(MF, mi, Reg,
+ /*UnfoldLoad=*/true,/*UnfoldStore=*/false,
+ NewMIs)) {
+ DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ return false;
+ }
+ assert(NewMIs.size() == 2 &&
+ "Unfolded a load into multiple instructions!");
+ // The load was previously folded, so this is the only use.
+ NewMIs[1]->addRegisterKilled(Reg, TRI);
+
+ // Tentatively insert the instructions into the block so that they
+ // look "normal" to the transformation logic.
+ mbbi->insert(mi, NewMIs[0]);
+ mbbi->insert(mi, NewMIs[1]);
+
+ DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0]
+ << "2addr: NEW INST: " << *NewMIs[1]);
+
+ // Transform the instruction, now that it no longer has a load.
+ unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
+ unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
+ MachineBasicBlock::iterator NewMI = NewMIs[1];
+ bool TransformSuccess =
+ TryInstructionTransform(NewMI, mi, mbbi,
+ NewSrcIdx, NewDstIdx, Dist);
+ if (TransformSuccess ||
+ NewMIs[1]->getOperand(NewSrcIdx).isKill()) {
+ // Success, or at least we made an improvement. Keep the unfolded
+ // instructions and discard the original.
+ if (LV) {
+ for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = mi->getOperand(i);
+ if (MO.isReg() && MO.getReg() != 0 &&
+ TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+ if (MO.isUse()) {
+ if (MO.isKill()) {
+ if (NewMIs[0]->killsRegister(MO.getReg()))
+ LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[0]);
+ else {
+ assert(NewMIs[1]->killsRegister(MO.getReg()) &&
+ "Kill missing after load unfold!");
+ LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[1]);
+ }
+ }
+ } else if (LV->removeVirtualRegisterDead(MO.getReg(), mi)) {
+ if (NewMIs[1]->registerDefIsDead(MO.getReg()))
+ LV->addVirtualRegisterDead(MO.getReg(), NewMIs[1]);
+ else {
+ assert(NewMIs[0]->registerDefIsDead(MO.getReg()) &&
+ "Dead flag missing after load unfold!");
+ LV->addVirtualRegisterDead(MO.getReg(), NewMIs[0]);
+ }
+ }
+ }
+ }
+ LV->addVirtualRegisterKilled(Reg, NewMIs[1]);
+ }
+ mi->eraseFromParent();
+ mi = NewMIs[1];
+ if (TransformSuccess)
+ return true;
+ } else {
+ // Transforming didn't eliminate the tie and didn't lead to an
+ // improvement. Clean up the unfolded instructions and keep the
+ // original.
+ DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+ NewMIs[0]->eraseFromParent();
+ NewMIs[1]->eraseFromParent();
+ }
+ }
+ }
+ }
+
return false;
}
@@ -1047,14 +1150,12 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
isProfitableToReMat(regB, rc, mi, DefMI, mbbi, Dist)){
DEBUG(dbgs() << "2addr: REMATTING : " << *DefMI << "\n");
unsigned regASubIdx = mi->getOperand(DstIdx).getSubReg();
- TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, TRI);
+ TII->reMaterialize(*mbbi, mi, regA, regASubIdx, DefMI, *TRI);
ReMatRegs.set(regB);
++NumReMats;
} else {
- bool Emitted = TII->copyRegToReg(*mbbi, mi, regA, regB, rc, rc,
- mi->getDebugLoc());
- (void)Emitted;
- assert(Emitted && "Unable to issue a copy instruction!\n");
+ BuildMI(*mbbi, mi, mi->getDebugLoc(), TII->get(TargetOpcode::COPY),
+ regA).addReg(regB);
}
MachineBasicBlock::iterator prevMI = prior(mi);
@@ -1104,12 +1205,30 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
}
}
}
-
+
+ // Schedule the source copy / remat inserted to form two-address
+ // instruction. FIXME: Does it matter the distance map may not be
+ // accurate after it's scheduled?
+ TII->scheduleTwoAddrSource(prior(mi), mi, *TRI);
+
MadeChange = true;
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
}
+ // Rewrite INSERT_SUBREG as COPY now that we no longer need SSA form.
+ if (mi->isInsertSubreg()) {
+ // From %reg = INSERT_SUBREG %reg, %subreg, subidx
+ // To %reg:subidx = COPY %subreg
+ unsigned SubIdx = mi->getOperand(3).getImm();
+ mi->RemoveOperand(3);
+ assert(mi->getOperand(0).getSubReg() == 0 && "Unexpected subreg idx");
+ mi->getOperand(0).setSubReg(SubIdx);
+ mi->RemoveOperand(1);
+ mi->setDesc(TII->get(TargetOpcode::COPY));
+ DEBUG(dbgs() << "\t\tconvert to:\t" << *mi);
+ }
+
// Clear TiedOperands here instead of at the top of the loop
// since most instructions do not have tied operands.
TiedOperands.clear();
@@ -1136,14 +1255,13 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &MF) {
static void UpdateRegSequenceSrcs(unsigned SrcReg,
unsigned DstReg, unsigned SubIdx,
- MachineRegisterInfo *MRI) {
+ MachineRegisterInfo *MRI,
+ const TargetRegisterInfo &TRI) {
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
RE = MRI->reg_end(); RI != RE; ) {
MachineOperand &MO = RI.getOperand();
++RI;
- MO.setReg(DstReg);
- assert(MO.getSubReg() == 0);
- MO.setSubReg(SubIdx);
+ MO.substVirtReg(DstReg, SubIdx, TRI);
}
}
@@ -1165,55 +1283,102 @@ TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector<unsigned,4> &Srcs,
if (!Seen.insert(SrcReg))
continue;
- // If there are no other uses than extract_subreg which feed into
+ // Check that the instructions are all in the same basic block.
+ MachineInstr *SrcDefMI = MRI->getVRegDef(SrcReg);
+ MachineInstr *DstDefMI = MRI->getVRegDef(DstReg);
+ if (SrcDefMI->getParent() != DstDefMI->getParent())
+ continue;
+
+ // If there are no other uses than copies which feed into
// the reg_sequence, then we might be able to coalesce them.
bool CanCoalesce = true;
- SmallVector<unsigned, 4> SubIndices;
+ SmallVector<unsigned, 4> SrcSubIndices, DstSubIndices;
for (MachineRegisterInfo::use_nodbg_iterator
UI = MRI->use_nodbg_begin(SrcReg),
UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
MachineInstr *UseMI = &*UI;
- if (!UseMI->isExtractSubreg() ||
- UseMI->getOperand(0).getReg() != DstReg) {
+ if (!UseMI->isCopy() || UseMI->getOperand(0).getReg() != DstReg) {
CanCoalesce = false;
break;
}
- SubIndices.push_back(UseMI->getOperand(2).getImm());
+ SrcSubIndices.push_back(UseMI->getOperand(1).getSubReg());
+ DstSubIndices.push_back(UseMI->getOperand(0).getSubReg());
}
- if (!CanCoalesce || SubIndices.size() < 2)
+ if (!CanCoalesce || SrcSubIndices.size() < 2)
continue;
- std::sort(SubIndices.begin(), SubIndices.end());
- unsigned NewSubIdx = 0;
- if (TRI->canCombinedSubRegIndex(MRI->getRegClass(SrcReg), SubIndices,
- NewSubIdx)) {
- bool Proceed = true;
- if (NewSubIdx)
- for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
- RE = MRI->reg_end(); RI != RE; ) {
- MachineOperand &MO = RI.getOperand();
- ++RI;
- // FIXME: If the sub-registers do not combine to the whole
- // super-register, i.e. NewSubIdx != 0, and any of the use has a
- // sub-register index, then abort the coalescing attempt.
- if (MO.getSubReg()) {
- Proceed = false;
- break;
- }
- MO.setReg(DstReg);
- MO.setSubReg(NewSubIdx);
- }
- if (Proceed)
- for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
- RE = MRI->reg_end(); RI != RE; ) {
- MachineOperand &MO = RI.getOperand();
- ++RI;
- MO.setReg(DstReg);
- if (NewSubIdx)
- MO.setSubReg(NewSubIdx);
- }
+ // Check that the source subregisters can be combined.
+ std::sort(SrcSubIndices.begin(), SrcSubIndices.end());
+ unsigned NewSrcSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SrcSubIndices,
+ NewSrcSubIdx))
+ continue;
+
+ // Check that the destination subregisters can also be combined.
+ std::sort(DstSubIndices.begin(), DstSubIndices.end());
+ unsigned NewDstSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(DstReg), DstSubIndices,
+ NewDstSubIdx))
+ continue;
+
+ // If neither source nor destination can be combined to the full register,
+ // just give up. This could be improved if it ever matters.
+ if (NewSrcSubIdx != 0 && NewDstSubIdx != 0)
+ continue;
+
+ // Now that we know that all the uses are extract_subregs and that those
+ // subregs can somehow be combined, scan all the extract_subregs again to
+ // make sure the subregs are in the right order and can be composed.
+ MachineInstr *SomeMI = 0;
+ CanCoalesce = true;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ assert(UseMI->isCopy());
+ unsigned DstSubIdx = UseMI->getOperand(0).getSubReg();
+ unsigned SrcSubIdx = UseMI->getOperand(1).getSubReg();
+ assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination");
+ if ((NewDstSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) ||
+ (NewSrcSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewDstSubIdx, SrcSubIdx) != DstSubIdx)) {
+ CanCoalesce = false;
+ break;
+ }
+ // Keep track of one of the uses.
+ SomeMI = UseMI;
+ }
+ if (!CanCoalesce)
+ continue;
+
+ // Insert a copy to replace the original.
+ MachineBasicBlock::iterator InsertLoc = SomeMI;
+ MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI,
+ SomeMI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY))
+ .addReg(DstReg, RegState::Define, NewDstSubIdx)
+ .addReg(SrcReg, 0, NewSrcSubIdx);
+
+ // Remove all the old extract instructions.
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ) {
+ MachineInstr *UseMI = &*UI;
+ ++UI;
+ if (UseMI == CopyMI)
+ continue;
+ assert(UseMI->isCopy());
+ // Move any kills to the new copy or extract instruction.
+ if (UseMI->getOperand(1).isKill()) {
+ CopyMI->getOperand(1).setIsKill();
+ if (LV)
+ // Update live variables
+ LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI);
}
+ UseMI->eraseFromParent();
+ }
}
}
@@ -1268,15 +1433,13 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
}
IsImpDef = false;
- // Remember EXTRACT_SUBREG sources. These might be candidate for
- // coalescing.
- if (DefMI->isExtractSubreg())
+ // Remember COPY sources. These might be candidate for coalescing.
+ if (DefMI->isCopy() && DefMI->getOperand(1).getSubReg())
RealSrcs.push_back(DefMI->getOperand(1).getReg());
- if (!Seen.insert(SrcReg) ||
- MI->getParent() != DefMI->getParent() ||
- !MI->getOperand(i).isKill() ||
- HasOtherRegSequenceUses(SrcReg, MI, MRI)) {
+ bool isKill = MI->getOperand(i).isKill();
+ if (!Seen.insert(SrcReg) || MI->getParent() != DefMI->getParent() ||
+ !isKill || HasOtherRegSequenceUses(SrcReg, MI, MRI)) {
// REG_SEQUENCE cannot have duplicated operands, add a copy.
// Also add an copy if the source is live-in the block. We don't want
// to end up with a partial-redef of a livein, e.g.
@@ -1292,30 +1455,23 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
// If the REG_SEQUENCE doesn't kill its source, keeping live variables
// correctly up to date becomes very difficult. Insert a copy.
//
- const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
- unsigned NewReg = MRI->createVirtualRegister(RC);
MachineBasicBlock::iterator InsertLoc = MI;
- bool Emitted =
- TII->copyRegToReg(*MI->getParent(), InsertLoc, NewReg, SrcReg, RC, RC,
- MI->getDebugLoc());
- (void)Emitted;
- assert(Emitted && "Unable to issue a copy instruction!\n");
- MI->getOperand(i).setReg(NewReg);
- if (MI->getOperand(i).isKill()) {
- MachineBasicBlock::iterator CopyMI = prior(InsertLoc);
- MachineOperand *KillMO = CopyMI->findRegisterUseOperand(SrcReg);
- KillMO->setIsKill();
- if (LV)
- // Update live variables
- LV->replaceKillInstruction(SrcReg, MI, &*CopyMI);
- }
+ MachineInstr *CopyMI = BuildMI(*MI->getParent(), InsertLoc,
+ MI->getDebugLoc(), TII->get(TargetOpcode::COPY))
+ .addReg(DstReg, RegState::Define, MI->getOperand(i+1).getImm())
+ .addReg(SrcReg, getKillRegState(isKill));
+ MI->getOperand(i).setReg(0);
+ if (LV && isKill)
+ LV->replaceKillInstruction(SrcReg, MI, CopyMI);
+ DEBUG(dbgs() << "Inserted: " << *CopyMI);
}
}
for (unsigned i = 1, e = MI->getNumOperands(); i < e; i += 2) {
unsigned SrcReg = MI->getOperand(i).getReg();
+ if (!SrcReg) continue;
unsigned SubIdx = MI->getOperand(i+1).getImm();
- UpdateRegSequenceSrcs(SrcReg, DstReg, SubIdx, MRI);
+ UpdateRegSequenceSrcs(SrcReg, DstReg, SubIdx, MRI, *TRI);
}
if (IsImpDef) {
@@ -1328,8 +1484,11 @@ bool TwoAddressInstructionPass::EliminateRegSequences() {
MI->eraseFromParent();
}
- // Try coalescing some EXTRACT_SUBREG instructions.
- CoalesceExtSubRegs(RealSrcs, DstReg);
+ // Try coalescing some EXTRACT_SUBREG instructions. This can create
+ // INSERT_SUBREG instructions that must have <undef> flags added by
+ // LiveIntervalAnalysis, so only run it when LiveVariables is available.
+ if (LV)
+ CoalesceExtSubRegs(RealSrcs, DstReg);
}
RegSequences.clear();
diff --git a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp b/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
index 871d836..57a1500 100644
--- a/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
+++ b/contrib/llvm/lib/CodeGen/VirtRegRewriter.cpp
@@ -667,8 +667,7 @@ static void ReMaterialize(MachineBasicBlock &MBB,
assert(TID.getNumDefs() == 1 &&
"Don't know how to remat instructions that define > 1 values!");
#endif
- TII->reMaterialize(MBB, MII, DestReg,
- ReMatDefMI->getOperand(0).getSubReg(), ReMatDefMI, TRI);
+ TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
MachineInstr *NewMI = prior(MII);
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
@@ -769,7 +768,7 @@ void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
I != E; ++I) {
unsigned Reg = I->first;
- const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
+ const TargetRegisterClass* RC = TRI->getMinimalPhysRegClass(Reg);
// FIXME: A temporary workaround. We can't reuse available value if it's
// not safe to move the def of the virtual register's class. e.g.
// X86::RFP* register classes. Do not add it as a live-in.
@@ -1022,7 +1021,7 @@ static unsigned FindFreeRegister(MachineBasicBlock::iterator MII,
for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
unsigned Kill = Kills[i];
if (!Defs[Kill] && !Uses[Kill] &&
- TRI->getPhysicalRegisterRegClass(Kill) == RC)
+ RC->contains(Kill))
return Kill;
}
for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
@@ -1410,25 +1409,25 @@ OptimizeByUnfold(MachineBasicBlock::iterator &MII,
if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
+ MBB->insert(MII, NewMI);
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
assert(Idx != -1);
SmallVector<unsigned, 1> Ops;
Ops.push_back(Idx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(NewMI, Ops, SS);
+ NewMI->eraseFromParent();
if (FoldedMI) {
VRM->addSpillSlotUse(SS, FoldedMI);
if (!VRM->hasPhys(UnfoldVR))
VRM->assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM->virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
- MII = MBB->insert(MII, FoldedMI);
+ MII = FoldedMI;
InvalidateKills(MI, TRI, RegKills, KillOps);
VRM->RemoveMachineInstrFromMaps(&MI);
MBB->erase(&MI);
- MF.DeleteMachineInstr(NewMI);
return true;
}
- MF.DeleteMachineInstr(NewMI);
}
}
@@ -1480,7 +1479,6 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
if (MII == MBB->begin() || !MII->killsRegister(SrcReg))
return false;
- MachineFunction &MF = *MBB->getParent();
MachineInstr &MI = *MII;
MachineBasicBlock::iterator DefMII = prior(MII);
MachineInstr *DefMI = DefMII;
@@ -1511,11 +1509,12 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
if (!CommutedMI)
return false;
+ MBB->insert(MII, CommutedMI);
SmallVector<unsigned, 1> Ops;
Ops.push_back(NewDstIdx);
- MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(CommutedMI, Ops, SS);
// Not needed since foldMemoryOperand returns new MI.
- MF.DeleteMachineInstr(CommutedMI);
+ CommutedMI->eraseFromParent();
if (!FoldedMI)
return false;
@@ -1528,7 +1527,7 @@ CommuteToFoldReload(MachineBasicBlock::iterator &MII,
MachineInstr *StoreMI = MII;
VRM->addSpillSlotUse(SS, StoreMI);
VRM->virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
- MII = MBB->insert(MII, FoldedMI); // Update MII to backtrack.
+ MII = FoldedMI; // Update MII to backtrack.
// Delete all 3 old instructions.
InvalidateKills(*ReloadMI, TRI, RegKills, KillOps);
@@ -1704,7 +1703,7 @@ bool LocalRewriter::InsertEmergencySpills(MachineInstr *MI) {
std::vector<unsigned> &EmSpills = VRM->getEmergencySpills(MI);
for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
unsigned PhysReg = EmSpills[i];
- const TargetRegisterClass *RC = TRI->getPhysicalRegisterRegClass(PhysReg);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysReg);
assert(RC && "Unable to determine register class!");
int SS = VRM->getEmergencySpillSlot(RC);
if (UsedSS.count(SS))
@@ -1759,7 +1758,6 @@ bool LocalRewriter::InsertRestores(MachineInstr *MI,
bool DoReMat = VRM->isReMaterialized(VirtReg);
int SSorRMId = DoReMat
? VRM->getReMatId(VirtReg) : VRM->getStackSlot(VirtReg);
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
if (InReg == Phys) {
// If the value is already available in the expected register, save
@@ -1793,20 +1791,16 @@ bool LocalRewriter::InsertRestores(MachineInstr *MI,
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(MII, MBB->begin(), Phys, TRI, DoReMat, SSorRMId, TII,
*MBB->getParent());
-
- TII->copyRegToReg(*MBB, InsertLoc, Phys, InReg, RC, RC,
- MI->getDebugLoc());
+ MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI->getDebugLoc(),
+ TII->get(TargetOpcode::COPY), Phys)
+ .addReg(InReg, RegState::Kill);
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
// Remember it's available.
Spills.addAvailable(SSorRMId, Phys);
- // Mark is killed.
- MachineInstr *CopyMI = prior(InsertLoc);
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
- KillOpnd->setIsKill();
UpdateKills(*CopyMI, TRI, RegKills, KillOps);
DEBUG(dbgs() << '\t' << *CopyMI);
@@ -2013,7 +2007,7 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// = EXTRACT_SUBREG fi#1
// fi#1 is available in EDI, but it cannot be reused because it's not in
// the right register file.
- if (PhysReg && !AvoidReload && (SubIdx || MI.isExtractSubreg())) {
+ if (PhysReg && !AvoidReload && SubIdx) {
const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
if (!RC->contains(PhysReg))
PhysReg = 0;
@@ -2034,6 +2028,18 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
Spills.canClobberPhysReg(PhysReg);
}
+ // If this is an asm, and PhysReg is used elsewhere as an earlyclobber
+ // operand, we can't also use it as an input. (Outputs always come
+ // before inputs, so we can stop looking at i.)
+ if (MI.isInlineAsm()) {
+ for (unsigned k=0; k<i; ++k) {
+ MachineOperand &MOk = MI.getOperand(k);
+ if (MOk.isReg() && MOk.getReg()==PhysReg && MOk.isEarlyClobber()) {
+ CanReuse = false;
+ break;
+ }
+ }
+ }
if (CanReuse) {
// If this stack slot value is already available, reuse it!
@@ -2104,6 +2110,8 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// To avoid this problem, and to avoid doing a load right after a store,
// we emit a copy from PhysReg into the designated register for this
// operand.
+ //
+ // This case also applies to an earlyclobber'd PhysReg.
unsigned DesignatedReg = VRM->getPhys(VirtReg);
assert(DesignatedReg && "Must map virtreg to physreg!");
@@ -2136,7 +2144,6 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
continue;
}
- const TargetRegisterClass* RC = MRI->getRegClass(VirtReg);
MRI->setPhysRegUsed(DesignatedReg);
ReusedOperands.markClobbered(DesignatedReg);
@@ -2144,11 +2151,9 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
MachineBasicBlock::iterator InsertLoc =
ComputeReloadLoc(&MI, MBB->begin(), PhysReg, TRI, DoReMat,
SSorRMId, TII, MF);
-
- TII->copyRegToReg(*MBB, InsertLoc, DesignatedReg, PhysReg, RC, RC,
- MI.getDebugLoc());
-
- MachineInstr *CopyMI = prior(InsertLoc);
+ MachineInstr *CopyMI = BuildMI(*MBB, InsertLoc, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY),
+ DesignatedReg).addReg(PhysReg);
CopyMI->setAsmPrinterFlag(MachineInstr::ReloadReuse);
UpdateKills(*CopyMI, TRI, RegKills, KillOps);
@@ -2269,27 +2274,16 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
DEBUG(dbgs() << "Promoted Load To Copy: " << MI);
if (DestReg != InReg) {
- const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
- TII->copyRegToReg(*MBB, &MI, DestReg, InReg, RC, RC,
- MI.getDebugLoc());
MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
- unsigned SubIdx = DefMO->getSubReg();
+ MachineInstr *CopyMI = BuildMI(*MBB, &MI, MI.getDebugLoc(),
+ TII->get(TargetOpcode::COPY))
+ .addReg(DestReg, RegState::Define, DefMO->getSubReg())
+ .addReg(InReg, RegState::Kill);
// Revisit the copy so we make sure to notice the effects of the
// operation on the destreg (either needing to RA it if it's
// virtual or needing to clobber any values if it's physical).
- NextMII = &MI;
- --NextMII; // backtrack to the copy.
+ NextMII = CopyMI;
NextMII->setAsmPrinterFlag(MachineInstr::ReloadReuse);
- // Propagate the sub-register index over.
- if (SubIdx) {
- DefMO = NextMII->findRegisterDefOperand(DestReg);
- DefMO->setSubReg(SubIdx);
- }
-
- // Mark is killed.
- MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
- KillOpnd->setIsKill();
-
BackTracked = true;
} else {
DEBUG(dbgs() << "Removing now-noop copy: " << MI);
@@ -2430,6 +2424,24 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// Also check if it's copying from an "undef", if so, we can't
// eliminate this or else the undef marker is lost and it will
// confuses the scavenger. This is extremely rare.
+ if (MI.isIdentityCopy() && !MI.getOperand(1).isUndef() &&
+ MI.getNumOperands() == 2) {
+ ++NumDCE;
+ DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+ SmallVector<unsigned, 2> KillRegs;
+ InvalidateKills(MI, TRI, RegKills, KillOps, &KillRegs);
+ if (MO.isDead() && !KillRegs.empty()) {
+ // Source register or an implicit super/sub-register use is killed.
+ assert(TRI->regsOverlap(KillRegs[0], MI.getOperand(0).getReg()));
+ // Last def is now dead.
+ TransferDeadness(MI.getOperand(1).getReg(), RegKills, KillOps);
+ }
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ Spills.disallowClobberPhysReg(VirtReg);
+ goto ProcessNextInst;
+ }
unsigned Src, Dst, SrcSR, DstSR;
if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) &&
Src == Dst && SrcSR == DstSR &&
@@ -2519,6 +2531,16 @@ LocalRewriter::RewriteMBB(LiveIntervals *LIs,
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
+ if (MI.isIdentityCopy()) {
+ ++NumDCE;
+ DEBUG(dbgs() << "Removing now-noop copy: " << MI);
+ InvalidateKills(MI, TRI, RegKills, KillOps);
+ VRM->RemoveMachineInstrFromMaps(&MI);
+ MBB->erase(&MI);
+ Erased = true;
+ UpdateKills(*LastStore, TRI, RegKills, KillOps);
+ goto ProcessNextInst;
+ }
{
unsigned Src, Dst, SrcSR, DstSR;
if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) &&
diff --git a/contrib/llvm/lib/CompilerDriver/Tool.cpp b/contrib/llvm/lib/CompilerDriver/Tool.cpp
index 5e558ca..c8488b2 100644
--- a/contrib/llvm/lib/CompilerDriver/Tool.cpp
+++ b/contrib/llvm/lib/CompilerDriver/Tool.cpp
@@ -85,7 +85,8 @@ StrVector Tool::SortArgs(ArgsVector& Args) const {
StrVector Out;
// HACK: this won't be needed when we'll migrate away from CommandLine.
- std::stable_sort(Args.begin(), Args.end(), &CompareFirst<unsigned, std::string>);
+ std::stable_sort(Args.begin(), Args.end(),
+ &CompareFirst<unsigned, std::string>);
for (ArgsVector::iterator B = Args.begin(), E = Args.end(); B != E; ++B) {
Out.push_back(B->second);
}
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
index 0748b54..59ebe6e 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -591,7 +591,7 @@ void Interpreter::popStackAndReturnValueToCaller(const Type *RetTy,
ECStack.pop_back();
if (ECStack.empty()) { // Finished main. Put result into exit code...
- if (RetTy && RetTy->isIntegerTy()) { // Nonvoid return type?
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
ExitValue = Result; // Capture the exit value of the program
} else {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
diff --git a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index 26a53b5..57d1260 100644
--- a/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -266,7 +266,7 @@ GenericValue Interpreter::callExternalFunction(Function *F,
RawFn = (RawFunc)(intptr_t)
sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName());
if (!RawFn)
- RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
+ RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
if (RawFn != 0)
RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
} else {
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
index 546d2b2..67bd3ed 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.cpp
@@ -626,10 +626,7 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) {
void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
assert(!isAlreadyCodeGenerating && "Error: Recursive compilation detected!");
- // JIT the function
- isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*F);
- isAlreadyCodeGenerating = false;
+ jitTheFunction(F, locked);
// If the function referred to another function that had not yet been
// read from bitcode, and we are jitting non-lazily, emit it now.
@@ -640,10 +637,7 @@ void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
assert(!PF->hasAvailableExternallyLinkage() &&
"Externally-defined function should not be in pending list.");
- // JIT the function
- isAlreadyCodeGenerating = true;
- jitstate->getPM(locked).run(*PF);
- isAlreadyCodeGenerating = false;
+ jitTheFunction(PF, locked);
// Now that the function has been jitted, ask the JITEmitter to rewrite
// the stub with real address of the function.
@@ -651,6 +645,15 @@ void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) {
}
}
+void JIT::jitTheFunction(Function *F, const MutexGuard &locked) {
+ isAlreadyCodeGenerating = true;
+ jitstate->getPM(locked).run(*F);
+ isAlreadyCodeGenerating = false;
+
+ // clear basic block addresses after this function is done
+ getBasicBlockAddressMap(locked).clear();
+}
+
/// getPointerToFunction - This method is used to get the address of the
/// specified function, compiling it if neccesary.
///
@@ -687,6 +690,41 @@ void *JIT::getPointerToFunction(Function *F) {
return Addr;
}
+void JIT::addPointerToBasicBlock(const BasicBlock *BB, void *Addr) {
+ MutexGuard locked(lock);
+
+ BasicBlockAddressMapTy::iterator I =
+ getBasicBlockAddressMap(locked).find(BB);
+ if (I == getBasicBlockAddressMap(locked).end()) {
+ getBasicBlockAddressMap(locked)[BB] = Addr;
+ } else {
+ // ignore repeats: some BBs can be split into few MBBs?
+ }
+}
+
+void JIT::clearPointerToBasicBlock(const BasicBlock *BB) {
+ MutexGuard locked(lock);
+ getBasicBlockAddressMap(locked).erase(BB);
+}
+
+void *JIT::getPointerToBasicBlock(BasicBlock *BB) {
+ // make sure it's function is compiled by JIT
+ (void)getPointerToFunction(BB->getParent());
+
+ // resolve basic block address
+ MutexGuard locked(lock);
+
+ BasicBlockAddressMapTy::iterator I =
+ getBasicBlockAddressMap(locked).find(BB);
+ if (I != getBasicBlockAddressMap(locked).end()) {
+ return I->second;
+ } else {
+ assert(0 && "JIT does not have BB address for address-of-label, was"
+ " it eliminated by optimizer?");
+ return 0;
+ }
+}
+
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
index edae719..1d1763e 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JIT.h
@@ -51,6 +51,10 @@ public:
class JIT : public ExecutionEngine {
+ /// types
+ typedef ValueMap<const BasicBlock *, void *>
+ BasicBlockAddressMapTy;
+ /// data
TargetMachine &TM; // The current target we are compiling to
TargetJITInfo &TJI; // The JITInfo for the target we are compiling to
JITCodeEmitter *JCE; // JCE object
@@ -67,6 +71,12 @@ class JIT : public ExecutionEngine {
JITState *jitstate;
+ /// BasicBlockAddressMap - A mapping between LLVM basic blocks and their
+ /// actualized version, only filled for basic blocks that have their address
+ /// taken.
+ BasicBlockAddressMapTy BasicBlockAddressMap;
+
+
JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
JITMemoryManager *JMM, CodeGenOpt::Level OptLevel,
bool AllocateGVsWithCode);
@@ -90,9 +100,9 @@ public:
CodeGenOpt::Level OptLevel =
CodeGenOpt::Default,
bool GVsWithCode = true,
- CodeModel::Model CMM = CodeModel::Default) {
+ CodeModel::Model CMM = CodeModel::Default) {
return ExecutionEngine::createJIT(M, Err, JMM, OptLevel, GVsWithCode,
- CMM);
+ CMM);
}
virtual void addModule(Module *M);
@@ -127,10 +137,15 @@ public:
///
void *getPointerToFunction(Function *F);
- void *getPointerToBasicBlock(BasicBlock *BB) {
- assert(0 && "JIT does not support address-of-label yet!");
- return 0;
- }
+ /// addPointerToBasicBlock - Adds address of the specific basic block.
+ void addPointerToBasicBlock(const BasicBlock *BB, void *Addr);
+
+ /// clearPointerToBasicBlock - Removes address of specific basic block.
+ void clearPointerToBasicBlock(const BasicBlock *BB);
+
+ /// getPointerToBasicBlock - This returns the address of the specified basic
+ /// block, assuming function is compiled.
+ void *getPointerToBasicBlock(BasicBlock *BB);
/// getOrEmitGlobalVariable - Return the address of the specified global
/// variable, possibly emitting it to memory if needed. This is used by the
@@ -197,11 +212,18 @@ public:
const JITEvent_EmittedFunctionDetails &Details);
void NotifyFreeingMachineCode(void *OldPtr);
+ BasicBlockAddressMapTy &
+ getBasicBlockAddressMap(const MutexGuard &) {
+ return BasicBlockAddressMap;
+ }
+
+
private:
static JITCodeEmitter *createEmitter(JIT &J, JITMemoryManager *JMM,
TargetMachine &tm);
void runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked);
void updateFunctionStub(Function *F);
+ void jitTheFunction(Function *F, const MutexGuard &locked);
protected:
diff --git a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
index e3855b2..28d79da 100644
--- a/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/contrib/llvm/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -435,6 +435,9 @@ namespace {
if (MBBLocations.size() <= (unsigned)MBB->getNumber())
MBBLocations.resize((MBB->getNumber()+1)*2);
MBBLocations[MBB->getNumber()] = getCurrentPCValue();
+ if (MBB->hasAddressTaken())
+ TheJIT->addPointerToBasicBlock(MBB->getBasicBlock(),
+ (void*)getCurrentPCValue());
DEBUG(dbgs() << "JIT: Emitting BB" << MBB->getNumber() << " at ["
<< (void*) getCurrentPCValue() << "]\n");
}
@@ -442,7 +445,7 @@ namespace {
virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
- virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const {
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
MBBLocations[MBB->getNumber()] && "MBB not emitted!");
return MBBLocations[MBB->getNumber()];
@@ -1310,6 +1313,11 @@ void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
deallocateMemForFunction(F.getFunction());
// Try again with at least twice as much free space.
SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
+
+ for (MachineFunction::iterator MBB = F.begin(), E = F.end(); MBB != E; ++MBB){
+ if (MBB->hasAddressTaken())
+ TheJIT->clearPointerToBasicBlock(MBB->getBasicBlock());
+ }
}
/// deallocateMemForFunction - Deallocate all memory for the specified
diff --git a/contrib/llvm/lib/Linker/LinkItems.cpp b/contrib/llvm/lib/Linker/LinkItems.cpp
index 2c22550..1be2bec 100644
--- a/contrib/llvm/lib/Linker/LinkItems.cpp
+++ b/contrib/llvm/lib/Linker/LinkItems.cpp
@@ -160,27 +160,26 @@ bool Linker::LinkInFile(const sys::Path &File, bool &is_native) {
// Check for a file of name "-", which means "read standard input"
if (File.str() == "-") {
std::auto_ptr<Module> M;
- MemoryBuffer *Buffer = MemoryBuffer::getSTDIN();
- if (!Buffer->getBufferSize()) {
- delete Buffer;
- Error = "standard input is empty";
- } else {
- M.reset(ParseBitcodeFile(Buffer, Context, &Error));
- delete Buffer;
- if (M.get())
- if (!LinkInModule(M.get(), &Error))
- return false;
+ if (MemoryBuffer *Buffer = MemoryBuffer::getSTDIN(&Error)) {
+ if (!Buffer->getBufferSize()) {
+ delete Buffer;
+ Error = "standard input is empty";
+ } else {
+ M.reset(ParseBitcodeFile(Buffer, Context, &Error));
+ delete Buffer;
+ if (M.get())
+ if (!LinkInModule(M.get(), &Error))
+ return false;
+ }
}
return error("Cannot link stdin: " + Error);
}
- // Make sure we can at least read the file
- if (!File.canRead())
+ // Determine what variety of file it is.
+ std::string Magic;
+ if (!File.getMagicNumber(Magic, 64))
return error("Cannot find linker input '" + File.str() + "'");
- // If its an archive, try to link it in
- std::string Magic;
- File.getMagicNumber(Magic, 64);
switch (sys::IdentifyFileType(Magic.c_str(), 64)) {
default: llvm_unreachable("Bad file type identification");
case sys::Unknown_FileType:
diff --git a/contrib/llvm/lib/MC/CMakeLists.txt b/contrib/llvm/lib/MC/CMakeLists.txt
index 5e8a3b6..fc4f3c6 100644
--- a/contrib/llvm/lib/MC/CMakeLists.txt
+++ b/contrib/llvm/lib/MC/CMakeLists.txt
@@ -14,6 +14,7 @@ add_llvm_library(LLVMMC
MCLoggingStreamer.cpp
MCMachOStreamer.cpp
MCNullStreamer.cpp
+ MCObjectStreamer.cpp
MCObjectWriter.cpp
MCSection.cpp
MCSectionCOFF.cpp
@@ -23,5 +24,7 @@ add_llvm_library(LLVMMC
MCSymbol.cpp
MCValue.cpp
MachObjectWriter.cpp
+ WinCOFFStreamer.cpp
+ WinCOFFObjectWriter.cpp
TargetAsmBackend.cpp
)
diff --git a/contrib/llvm/lib/MC/MCAsmStreamer.cpp b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
index 57b2bcc..e272b60 100644
--- a/contrib/llvm/lib/MC/MCAsmStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCAsmStreamer.cpp
@@ -275,19 +275,20 @@ void MCAsmStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
case MCSA_Global: // .globl/.global
OS << MAI.getGlobalDirective();
break;
- case MCSA_Hidden: OS << ".hidden "; break;
- case MCSA_IndirectSymbol: OS << ".indirect_symbol "; break;
- case MCSA_Internal: OS << ".internal "; break;
- case MCSA_LazyReference: OS << ".lazy_reference "; break;
- case MCSA_Local: OS << ".local "; break;
- case MCSA_NoDeadStrip: OS << ".no_dead_strip "; break;
- case MCSA_PrivateExtern: OS << ".private_extern "; break;
- case MCSA_Protected: OS << ".protected "; break;
- case MCSA_Reference: OS << ".reference "; break;
- case MCSA_Weak: OS << ".weak "; break;
- case MCSA_WeakDefinition: OS << ".weak_definition "; break;
+ case MCSA_Hidden: OS << "\t.hidden\t"; break;
+ case MCSA_IndirectSymbol: OS << "\t.indirect_symbol\t"; break;
+ case MCSA_Internal: OS << "\t.internal\t"; break;
+ case MCSA_LazyReference: OS << "\t.lazy_reference\t"; break;
+ case MCSA_Local: OS << "\t.local\t"; break;
+ case MCSA_NoDeadStrip: OS << "\t.no_dead_strip\t"; break;
+ case MCSA_PrivateExtern: OS << "\t.private_extern\t"; break;
+ case MCSA_Protected: OS << "\t.protected\t"; break;
+ case MCSA_Reference: OS << "\t.reference\t"; break;
+ case MCSA_Weak: OS << "\t.weak\t"; break;
+ case MCSA_WeakDefinition: OS << "\t.weak_definition\t"; break;
// .weak_reference
case MCSA_WeakReference: OS << MAI.getWeakRefDirective(); break;
+ case MCSA_WeakDefAutoPrivate: OS << "\t.weak_def_can_be_hidden\t"; break;
}
OS << *Symbol;
@@ -693,7 +694,6 @@ void MCAsmStreamer::EmitRawText(StringRef String) {
}
void MCAsmStreamer::Finish() {
- OS.flush();
}
MCStreamer *llvm::createAsmStreamer(MCContext &Context,
diff --git a/contrib/llvm/lib/MC/MCAssembler.cpp b/contrib/llvm/lib/MC/MCAssembler.cpp
index 5936656..7d84554 100644
--- a/contrib/llvm/lib/MC/MCAssembler.cpp
+++ b/contrib/llvm/lib/MC/MCAssembler.cpp
@@ -308,24 +308,23 @@ static bool isScatteredFixupFullyResolved(const MCAssembler &Asm,
return !B_Base && BaseSymbol == A_Base;
}
-bool MCAssembler::isSymbolLinkerVisible(const MCSymbolData *SD) const {
+bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
// Non-temporary labels should always be visible to the linker.
- if (!SD->getSymbol().isTemporary())
+ if (!Symbol.isTemporary())
return true;
// Absolute temporary labels are never visible.
- if (!SD->getFragment())
+ if (!Symbol.isInSection())
return false;
// Otherwise, check if the section requires symbols even for temporary labels.
- return getBackend().doesSectionRequireSymbols(
- SD->getFragment()->getParent()->getSection());
+ return getBackend().doesSectionRequireSymbols(Symbol.getSection());
}
const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
const MCSymbolData *SD) const {
// Linker visible symbols define atoms.
- if (isSymbolLinkerVisible(SD))
+ if (isSymbolLinkerVisible(SD->getSymbol()))
return SD;
// Absolute and undefined symbols have no defining atom.
@@ -685,12 +684,8 @@ void MCAssembler::Finish() {
for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
// Create dummy fragments to eliminate any empty sections, this simplifies
// layout.
- if (it->getFragmentList().empty()) {
- unsigned ValueSize = 1;
- if (getBackend().isVirtualSection(it->getSection()))
- ValueSize = 1;
+ if (it->getFragmentList().empty())
new MCFillFragment(0, 1, 0, it);
- }
it->setOrdinal(SectionIndex++);
}
@@ -759,7 +754,6 @@ void MCAssembler::Finish() {
// Write the object file.
Writer->WriteObject(*this, Layout);
- OS.flush();
stats::ObjectBytes += OS.tell() - StartOffset;
}
diff --git a/contrib/llvm/lib/MC/MCContext.cpp b/contrib/llvm/lib/MC/MCContext.cpp
index 53ffc94..1137064 100644
--- a/contrib/llvm/lib/MC/MCContext.cpp
+++ b/contrib/llvm/lib/MC/MCContext.cpp
@@ -27,6 +27,10 @@ MCContext::MCContext(const MCAsmInfo &mai) : MAI(mai), NextUniqueID(0) {
MachOUniquingMap = 0;
ELFUniquingMap = 0;
COFFUniquingMap = 0;
+
+ SecureLogFile = getenv("AS_SECURE_LOG_FILE");
+ SecureLog = 0;
+ SecureLogUsed = false;
}
MCContext::~MCContext() {
@@ -37,6 +41,9 @@ MCContext::~MCContext() {
delete (MachOUniqueMapTy*)MachOUniquingMap;
delete (ELFUniqueMapTy*)ELFUniquingMap;
delete (COFFUniqueMapTy*)COFFUniquingMap;
+
+ // If the stream for the .secure_log_unique directive was created free it.
+ delete (raw_ostream*)SecureLog;
}
//===----------------------------------------------------------------------===//
@@ -90,14 +97,14 @@ MCSymbol *MCContext::CreateDirectionalLocalSymbol(int64_t LocalLabelVal) {
return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
Twine(LocalLabelVal) +
"\2" +
- Twine(NextInstance(LocalLabelVal)));
+ Twine(NextInstance(LocalLabelVal)));
}
MCSymbol *MCContext::GetDirectionalLocalSymbol(int64_t LocalLabelVal,
int bORf) {
return GetOrCreateSymbol(Twine(MAI.getPrivateGlobalPrefix()) +
Twine(LocalLabelVal) +
"\2" +
- Twine(GetInstance(LocalLabelVal) + bORf));
+ Twine(GetInstance(LocalLabelVal) + bORf));
}
MCSymbol *MCContext::LookupSymbol(StringRef Name) const {
diff --git a/contrib/llvm/lib/MC/MCExpr.cpp b/contrib/llvm/lib/MC/MCExpr.cpp
index c000dd7..343f334 100644
--- a/contrib/llvm/lib/MC/MCExpr.cpp
+++ b/contrib/llvm/lib/MC/MCExpr.cpp
@@ -40,7 +40,7 @@ void MCExpr::print(raw_ostream &OS) const {
const MCSymbol &Sym = SRE.getSymbol();
if (SRE.getKind() == MCSymbolRefExpr::VK_ARM_HI16 ||
- SRE.getKind() == MCSymbolRefExpr::VK_ARM_LO16)
+ SRE.getKind() == MCSymbolRefExpr::VK_ARM_LO16)
OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
// Parenthesize names that start with $ so that they don't look like
@@ -51,8 +51,8 @@ void MCExpr::print(raw_ostream &OS) const {
OS << Sym;
if (SRE.getKind() != MCSymbolRefExpr::VK_None &&
- SRE.getKind() != MCSymbolRefExpr::VK_ARM_HI16 &&
- SRE.getKind() != MCSymbolRefExpr::VK_ARM_LO16)
+ SRE.getKind() != MCSymbolRefExpr::VK_ARM_HI16 &&
+ SRE.getKind() != MCSymbolRefExpr::VK_ARM_LO16)
OS << '@' << MCSymbolRefExpr::getVariantKindName(SRE.getKind());
return;
diff --git a/contrib/llvm/lib/MC/MCMachOStreamer.cpp b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
index 27e4e98..44bc267 100644
--- a/contrib/llvm/lib/MC/MCMachOStreamer.cpp
+++ b/contrib/llvm/lib/MC/MCMachOStreamer.cpp
@@ -14,6 +14,7 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCMachOSymbolFlags.h"
@@ -25,21 +26,13 @@ using namespace llvm;
namespace {
-class MCMachOStreamer : public MCStreamer {
-
-private:
- MCAssembler Assembler;
- MCSectionData *CurSectionData;
-
- /// Track the current atom for each section.
- DenseMap<const MCSectionData*, MCSymbolData*> CurrentAtomMap;
-
+class MCMachOStreamer : public MCObjectStreamer {
private:
MCFragment *getCurrentFragment() const {
- assert(CurSectionData && "No current section!");
+ assert(getCurrentSectionData() && "No current section!");
- if (!CurSectionData->empty())
- return &CurSectionData->getFragmentList().back();
+ if (!getCurrentSectionData()->empty())
+ return &getCurrentSectionData()->getFragmentList().back();
return 0;
}
@@ -49,28 +42,17 @@ private:
MCDataFragment *getOrCreateDataFragment() const {
MCDataFragment *F = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
if (!F)
- F = createDataFragment();
+ F = new MCDataFragment(getCurrentSectionData());
return F;
}
- /// Create a new data fragment in the current section.
- MCDataFragment *createDataFragment() const {
- MCDataFragment *DF = new MCDataFragment(CurSectionData);
- DF->setAtom(CurrentAtomMap.lookup(CurSectionData));
- return DF;
- }
-
void EmitInstToFragment(const MCInst &Inst);
void EmitInstToData(const MCInst &Inst);
public:
MCMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
- raw_ostream &_OS, MCCodeEmitter *_Emitter)
- : MCStreamer(Context), Assembler(Context, TAB, *_Emitter, _OS),
- CurSectionData(0) {}
- ~MCMachOStreamer() {}
-
- MCAssembler &getAssembler() { return Assembler; }
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
const MCExpr *AddValueSymbols(const MCExpr *Value) {
switch (Value->getKind()) {
@@ -86,7 +68,7 @@ public:
}
case MCExpr::SymbolRef:
- Assembler.getOrCreateSymbolData(
+ getAssembler().getOrCreateSymbolData(
cast<MCSymbolRefExpr>(Value)->getSymbol());
break;
@@ -101,7 +83,6 @@ public:
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section);
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
@@ -152,6 +133,7 @@ public:
}
virtual void EmitInstruction(const MCInst &Inst);
+
virtual void Finish();
/// @}
@@ -159,38 +141,25 @@ public:
} // end anonymous namespace.
-void MCMachOStreamer::SwitchSection(const MCSection *Section) {
- assert(Section && "Cannot switch to a null section!");
-
- // If already in this section, then this is a noop.
- if (Section == CurSection) return;
-
- CurSection = Section;
- CurSectionData = &Assembler.getOrCreateSectionData(*Section);
-}
-
void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
assert(CurSection && "Cannot emit before setting section!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ Symbol->setSection(*CurSection);
- // Update the current atom map, if necessary.
- bool MustCreateFragment = false;
- if (Assembler.isSymbolLinkerVisible(&SD)) {
- CurrentAtomMap[CurSectionData] = &SD;
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- // We have to create a new fragment, fragments cannot span atoms.
- MustCreateFragment = true;
- }
+ // We have to create a new fragment if this is an atom defining symbol,
+ // fragments cannot span atoms.
+ if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
+ new MCDataFragment(getCurrentSectionData());
// FIXME: This is wasteful, we don't necessarily need to create a data
// fragment. Instead, we should mark the symbol as pointing into the data
// fragment if it exists, otherwise we should just queue the label and set its
// fragment pointer when we emit the next fragment.
- MCDataFragment *F =
- MustCreateFragment ? createDataFragment() : getOrCreateDataFragment();
+ MCDataFragment *F = getOrCreateDataFragment();
assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
SD.setFragment(F);
SD.setOffset(F->getContents().size());
@@ -203,14 +172,12 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
// FIXME: Cleanup this code, these bits should be emitted based on semantic
// properties, not on the order of definition, etc.
SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeMask);
-
- Symbol->setSection(*CurSection);
}
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
case MCAF_SubsectionsViaSymbols:
- Assembler.setSubsectionsViaSymbols(true);
+ getAssembler().setSubsectionsViaSymbols(true);
return;
}
@@ -219,7 +186,7 @@ void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
void MCMachOStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
// FIXME: Lift context changes into super class.
- Assembler.getOrCreateSymbolData(*Symbol);
+ getAssembler().getOrCreateSymbolData(*Symbol);
Symbol->setVariableValue(AddValueSymbols(Value));
}
@@ -232,15 +199,15 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
// important for matching the string table that 'as' generates.
IndirectSymbolData ISD;
ISD.Symbol = Symbol;
- ISD.SectionData = CurSectionData;
- Assembler.getIndirectSymbols().push_back(ISD);
+ ISD.SectionData = getCurrentSectionData();
+ getAssembler().getIndirectSymbols().push_back(ISD);
return;
}
// Adding a symbol attribute always introduces the symbol, note that an
// important side effect of calling getOrCreateSymbolData here is to register
// the symbol with the assembler.
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
// The implementation of symbol attributes is designed to match 'as', but it
// leaves much to desired. It doesn't really make sense to arbitrarily add and
@@ -306,6 +273,10 @@ void MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
// it has to be in a coalesced section, but this isn't enforced.
SD.setFlags(SD.getFlags() | SF_WeakDefinition);
break;
+
+ case MCSA_WeakDefAutoPrivate:
+ SD.setFlags(SD.getFlags() | SF_WeakDefinition | SF_WeakReference);
+ break;
}
}
@@ -313,7 +284,8 @@ void MCMachOStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
// Encode the 'desc' value into the lowest implementation defined bits.
assert(DescValue == (DescValue & SF_DescFlagsMask) &&
"Invalid .desc value!");
- Assembler.getOrCreateSymbolData(*Symbol).setFlags(DescValue&SF_DescFlagsMask);
+ getAssembler().getOrCreateSymbolData(*Symbol).setFlags(
+ DescValue & SF_DescFlagsMask);
}
void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
@@ -321,14 +293,14 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
// FIXME: Darwin 'as' does appear to allow redef of a .comm by itself.
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
SD.setExternal(true);
SD.setCommon(Size, ByteAlignment);
}
void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
unsigned Size, unsigned ByteAlignment) {
- MCSectionData &SectData = Assembler.getOrCreateSectionData(*Section);
+ MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
// The symbol may not be present, which only creates the section.
if (!Symbol)
@@ -338,7 +310,7 @@ void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
// Emit an align fragment if necessary.
if (ByteAlignment != 1)
@@ -346,8 +318,6 @@ void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
SD.setFragment(F);
- if (Assembler.isSymbolLinkerVisible(&SD))
- F->setAtom(&SD);
Symbol->setSection(*Section);
@@ -391,13 +361,12 @@ void MCMachOStreamer::EmitValueToAlignment(unsigned ByteAlignment,
unsigned MaxBytesToEmit) {
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
- MCFragment *F = new MCAlignFragment(ByteAlignment, Value, ValueSize,
- MaxBytesToEmit, CurSectionData);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
@@ -405,24 +374,21 @@ void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
- CurSectionData);
+ getCurrentSectionData());
F->setEmitNops(true);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
- MCFragment *F = new MCOrgFragment(*Offset, Value, CurSectionData);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
}
void MCMachOStreamer::EmitInstToFragment(const MCInst &Inst) {
- MCInstFragment *IF = new MCInstFragment(Inst, CurSectionData);
- IF->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
// Add the fixups and data.
//
@@ -431,7 +397,7 @@ void MCMachOStreamer::EmitInstToFragment(const MCInst &Inst) {
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
IF->getCode() = Code;
@@ -444,7 +410,7 @@ void MCMachOStreamer::EmitInstToData(const MCInst &Inst) {
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
// Add the fixups and data.
@@ -461,21 +427,21 @@ void MCMachOStreamer::EmitInstruction(const MCInst &Inst) {
if (Inst.getOperand(i).isExpr())
AddValueSymbols(Inst.getOperand(i).getExpr());
- CurSectionData->setHasInstructions(true);
+ getCurrentSectionData()->setHasInstructions(true);
// If this instruction doesn't need relaxation, just emit it as data.
- if (!Assembler.getBackend().MayNeedRelaxation(Inst)) {
+ if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
EmitInstToData(Inst);
return;
}
// Otherwise, if we are relaxing everything, relax the instruction as much as
// possible and emit it as data.
- if (Assembler.getRelaxAll()) {
+ if (getAssembler().getRelaxAll()) {
MCInst Relaxed;
- Assembler.getBackend().RelaxInstruction(Inst, Relaxed);
- while (Assembler.getBackend().MayNeedRelaxation(Relaxed))
- Assembler.getBackend().RelaxInstruction(Relaxed, Relaxed);
+ getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
EmitInstToData(Relaxed);
return;
}
@@ -485,7 +451,36 @@ void MCMachOStreamer::EmitInstruction(const MCInst &Inst) {
}
void MCMachOStreamer::Finish() {
- Assembler.Finish();
+ // We have to set the fragment atom associations so we can relax properly for
+ // Mach-O.
+
+ // First, scan the symbol table to build a lookup table from fragments to
+ // defining symbols.
+ DenseMap<const MCFragment*, MCSymbolData*> DefiningSymbolMap;
+ for (MCAssembler::symbol_iterator it = getAssembler().symbol_begin(),
+ ie = getAssembler().symbol_end(); it != ie; ++it) {
+ if (getAssembler().isSymbolLinkerVisible(it->getSymbol()) &&
+ it->getFragment()) {
+ // An atom defining symbol should never be internal to a fragment.
+ assert(it->getOffset() == 0 && "Invalid offset in atom defining symbol!");
+ DefiningSymbolMap[it->getFragment()] = it;
+ }
+ }
+
+ // Set the fragment atom associations by tracking the last seen atom defining
+ // symbol.
+ for (MCAssembler::iterator it = getAssembler().begin(),
+ ie = getAssembler().end(); it != ie; ++it) {
+ MCSymbolData *CurrentAtom = 0;
+ for (MCSectionData::iterator it2 = it->begin(),
+ ie2 = it->end(); it2 != ie2; ++it2) {
+ if (MCSymbolData *SD = DefiningSymbolMap.lookup(it2))
+ CurrentAtom = SD;
+ it2->setAtom(CurrentAtom);
+ }
+ }
+
+ this->MCObjectStreamer::Finish();
}
MCStreamer *llvm::createMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
diff --git a/contrib/llvm/lib/MC/MCObjectStreamer.cpp b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
new file mode 100644
index 0000000..d3f7f77
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCObjectStreamer.cpp
@@ -0,0 +1,39 @@
+//===- lib/MC/MCObjectStreamer.cpp - Object File MCStreamer Interface -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCObjectStreamer.h"
+
+#include "llvm/MC/MCAssembler.h"
+using namespace llvm;
+
+MCObjectStreamer::MCObjectStreamer(MCContext &Context, TargetAsmBackend &TAB,
+ raw_ostream &_OS, MCCodeEmitter *_Emitter)
+ : MCStreamer(Context), Assembler(new MCAssembler(Context, TAB,
+ *_Emitter, _OS)),
+ CurSectionData(0)
+{
+}
+
+MCObjectStreamer::~MCObjectStreamer() {
+ delete Assembler;
+}
+
+void MCObjectStreamer::SwitchSection(const MCSection *Section) {
+ assert(Section && "Cannot switch to a null section!");
+
+ // If already in this section, then this is a noop.
+ if (Section == CurSection) return;
+
+ CurSection = Section;
+ CurSectionData = &getAssembler().getOrCreateSectionData(*Section);
+}
+
+void MCObjectStreamer::Finish() {
+ getAssembler().Finish();
+}
diff --git a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
index 1cbe09a..465d983 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmLexer.cpp
@@ -23,7 +23,6 @@ using namespace llvm;
AsmLexer::AsmLexer(const MCAsmInfo &_MAI) : MAI(_MAI) {
CurBuf = NULL;
CurPtr = NULL;
- TokStart = 0;
}
AsmLexer::~AsmLexer() {
@@ -40,10 +39,6 @@ void AsmLexer::setBuffer(const MemoryBuffer *buf, const char *ptr) {
TokStart = 0;
}
-SMLoc AsmLexer::getLoc() const {
- return SMLoc::getFromPointer(TokStart);
-}
-
/// ReturnError - Set the error to the specified string at the specified
/// location. This is defined to always return AsmToken::Error.
AsmToken AsmLexer::ReturnError(const char *Loc, const std::string &Msg) {
@@ -229,7 +224,7 @@ StringRef AsmLexer::LexUntilEndOfStatement() {
TokStart = CurPtr;
while (!isAtStartOfComment(*CurPtr) && // Start of line comment.
- *CurPtr != ';' && // End of statement marker.
+ *CurPtr != ';' && // End of statement marker.
*CurPtr != '\n' &&
*CurPtr != '\r' &&
(*CurPtr != 0 || CurPtr != CurBuf->getBufferEnd())) {
diff --git a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
index 4523eab..e0949bd 100644
--- a/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -18,34 +18,85 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetAsmParser.h"
using namespace llvm;
+namespace {
+
+/// \brief Generic implementations of directive handling, etc. which is shared
+/// (or the default, at least) for all assembler parser.
+class GenericAsmParser : public MCAsmParserExtension {
+public:
+ GenericAsmParser() {}
+
+ virtual void Initialize(MCAsmParser &Parser) {
+ // Call the base implementation.
+ this->MCAsmParserExtension::Initialize(Parser);
+
+ // Debugging directives.
+ Parser.AddDirectiveHandler(this, ".file", MCAsmParser::DirectiveHandler(
+ &GenericAsmParser::ParseDirectiveFile));
+ Parser.AddDirectiveHandler(this, ".line", MCAsmParser::DirectiveHandler(
+ &GenericAsmParser::ParseDirectiveLine));
+ Parser.AddDirectiveHandler(this, ".loc", MCAsmParser::DirectiveHandler(
+ &GenericAsmParser::ParseDirectiveLoc));
+ }
+
+ bool ParseDirectiveFile(StringRef, SMLoc DirectiveLoc); // ".file"
+ bool ParseDirectiveLine(StringRef, SMLoc DirectiveLoc); // ".line"
+ bool ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc); // ".loc"
+};
+
+}
+
+namespace llvm {
+
+extern MCAsmParserExtension *createDarwinAsmParser();
+extern MCAsmParserExtension *createELFAsmParser();
+
+}
enum { DEFAULT_ADDRSPACE = 0 };
-AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx, MCStreamer &_Out,
- const MCAsmInfo &_MAI)
- : Lexer(_MAI), Ctx(_Ctx), Out(_Out), SrcMgr(_SM), TargetParser(0),
- CurBuffer(0) {
+AsmParser::AsmParser(const Target &T, SourceMgr &_SM, MCContext &_Ctx,
+ MCStreamer &_Out, const MCAsmInfo &_MAI)
+ : Lexer(_MAI), Ctx(_Ctx), Out(_Out), SrcMgr(_SM),
+ GenericParser(new GenericAsmParser), PlatformParser(0),
+ TargetParser(0), CurBuffer(0) {
Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
-
- // Debugging directives.
- AddDirectiveHandler(".file", &AsmParser::ParseDirectiveFile);
- AddDirectiveHandler(".line", &AsmParser::ParseDirectiveLine);
- AddDirectiveHandler(".loc", &AsmParser::ParseDirectiveLoc);
-}
+ // Initialize the generic parser.
+ GenericParser->Initialize(*this);
+ // Initialize the platform / file format parser.
+ //
+ // FIXME: This is a hack, we need to (majorly) cleanup how these objects are
+ // created.
+ if (_MAI.hasSubsectionsViaSymbols()) {
+ PlatformParser = createDarwinAsmParser();
+ PlatformParser->Initialize(*this);
+ } else {
+ PlatformParser = createELFAsmParser();
+ PlatformParser->Initialize(*this);
+ }
+}
AsmParser::~AsmParser() {
+ delete PlatformParser;
+ delete GenericParser;
+}
+
+void AsmParser::setTargetParser(TargetAsmParser &P) {
+ assert(!TargetParser && "Target parser is already initialized!");
+ TargetParser = &P;
+ TargetParser->Initialize(*this);
}
void AsmParser::Warning(SMLoc L, const Twine &Msg) {
@@ -57,11 +108,6 @@ bool AsmParser::Error(SMLoc L, const Twine &Msg) {
return true;
}
-bool AsmParser::TokError(const char *Msg) {
- PrintMessage(Lexer.getLoc(), Msg, "error");
- return true;
-}
-
void AsmParser::PrintMessage(SMLoc Loc, const std::string &Msg,
const char *Type) const {
SrcMgr.PrintMessage(Loc, Msg, Type);
@@ -163,11 +209,6 @@ bool AsmParser::ParseParenExpr(const MCExpr *&Res, SMLoc &EndLoc) {
return false;
}
-MCSymbol *AsmParser::CreateSymbol(StringRef Name) {
- // FIXME: Inline into callers.
- return Ctx.GetOrCreateSymbol(Name);
-}
-
/// ParsePrimaryExpr - Parse a primary expression and return it.
/// primaryexpr ::= (parenexpr
/// primaryexpr ::= symbol
@@ -188,7 +229,7 @@ bool AsmParser::ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
case AsmToken::Identifier: {
// This is a symbol reference.
std::pair<StringRef, StringRef> Split = getTok().getIdentifier().split('@');
- MCSymbol *Sym = CreateSymbol(Split.first);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Split.first);
// Mark the symbol as used in an expression.
Sym->setUsedInExpr(true);
@@ -454,8 +495,8 @@ bool AsmParser::ParseStatement() {
IDVal = getTok().getString();
Lex(); // Consume the integer token to be used as an identifier token.
if (Lexer.getKind() != AsmToken::Colon) {
- if (!TheCondState.Ignore)
- return TokError("unexpected token at start of statement");
+ if (!TheCondState.Ignore)
+ return TokError("unexpected token at start of statement");
}
}
}
@@ -498,7 +539,7 @@ bool AsmParser::ParseStatement() {
// implicitly marked as external.
MCSymbol *Sym;
if (LocalLabelVal == -1)
- Sym = CreateSymbol(IDVal);
+ Sym = getContext().GetOrCreateSymbol(IDVal);
else
Sym = Ctx.CreateDirectionalLocalSymbol(LocalLabelVal);
if (!Sym->isUndefined() || Sym->isVariable())
@@ -530,158 +571,6 @@ bool AsmParser::ParseStatement() {
// Otherwise, we have a normal instruction or directive.
if (IDVal[0] == '.') {
- // FIXME: This should be driven based on a hash lookup and callback.
- if (IDVal == ".section")
- return ParseDirectiveDarwinSection();
- if (IDVal == ".text")
- // FIXME: This changes behavior based on the -static flag to the
- // assembler.
- return ParseDirectiveSectionSwitch("__TEXT", "__text",
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
- if (IDVal == ".const")
- return ParseDirectiveSectionSwitch("__TEXT", "__const");
- if (IDVal == ".static_const")
- return ParseDirectiveSectionSwitch("__TEXT", "__static_const");
- if (IDVal == ".cstring")
- return ParseDirectiveSectionSwitch("__TEXT","__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
- if (IDVal == ".literal4")
- return ParseDirectiveSectionSwitch("__TEXT", "__literal4",
- MCSectionMachO::S_4BYTE_LITERALS,
- 4);
- if (IDVal == ".literal8")
- return ParseDirectiveSectionSwitch("__TEXT", "__literal8",
- MCSectionMachO::S_8BYTE_LITERALS,
- 8);
- if (IDVal == ".literal16")
- return ParseDirectiveSectionSwitch("__TEXT","__literal16",
- MCSectionMachO::S_16BYTE_LITERALS,
- 16);
- if (IDVal == ".constructor")
- return ParseDirectiveSectionSwitch("__TEXT","__constructor");
- if (IDVal == ".destructor")
- return ParseDirectiveSectionSwitch("__TEXT","__destructor");
- if (IDVal == ".fvmlib_init0")
- return ParseDirectiveSectionSwitch("__TEXT","__fvmlib_init0");
- if (IDVal == ".fvmlib_init1")
- return ParseDirectiveSectionSwitch("__TEXT","__fvmlib_init1");
-
- // FIXME: The assembler manual claims that this has the self modify code
- // flag, at least on x86-32, but that does not appear to be correct.
- if (IDVal == ".symbol_stub")
- return ParseDirectiveSectionSwitch("__TEXT","__symbol_stub",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- // FIXME: Different on PPC and ARM.
- 0, 16);
- // FIXME: PowerPC only?
- if (IDVal == ".picsymbol_stub")
- return ParseDirectiveSectionSwitch("__TEXT","__picsymbol_stub",
- MCSectionMachO::S_SYMBOL_STUBS |
- MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
- 0, 26);
- if (IDVal == ".data")
- return ParseDirectiveSectionSwitch("__DATA", "__data");
- if (IDVal == ".static_data")
- return ParseDirectiveSectionSwitch("__DATA", "__static_data");
-
- // FIXME: The section names of these two are misspelled in the assembler
- // manual.
- if (IDVal == ".non_lazy_symbol_pointer")
- return ParseDirectiveSectionSwitch("__DATA", "__nl_symbol_ptr",
- MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS,
- 4);
- if (IDVal == ".lazy_symbol_pointer")
- return ParseDirectiveSectionSwitch("__DATA", "__la_symbol_ptr",
- MCSectionMachO::S_LAZY_SYMBOL_POINTERS,
- 4);
-
- if (IDVal == ".dyld")
- return ParseDirectiveSectionSwitch("__DATA", "__dyld");
- if (IDVal == ".mod_init_func")
- return ParseDirectiveSectionSwitch("__DATA", "__mod_init_func",
- MCSectionMachO::S_MOD_INIT_FUNC_POINTERS,
- 4);
- if (IDVal == ".mod_term_func")
- return ParseDirectiveSectionSwitch("__DATA", "__mod_term_func",
- MCSectionMachO::S_MOD_TERM_FUNC_POINTERS,
- 4);
- if (IDVal == ".const_data")
- return ParseDirectiveSectionSwitch("__DATA", "__const");
-
-
- if (IDVal == ".objc_class")
- return ParseDirectiveSectionSwitch("__OBJC", "__class",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_meta_class")
- return ParseDirectiveSectionSwitch("__OBJC", "__meta_class",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_cat_cls_meth")
- return ParseDirectiveSectionSwitch("__OBJC", "__cat_cls_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_cat_inst_meth")
- return ParseDirectiveSectionSwitch("__OBJC", "__cat_inst_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_protocol")
- return ParseDirectiveSectionSwitch("__OBJC", "__protocol",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_string_object")
- return ParseDirectiveSectionSwitch("__OBJC", "__string_object",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_cls_meth")
- return ParseDirectiveSectionSwitch("__OBJC", "__cls_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_inst_meth")
- return ParseDirectiveSectionSwitch("__OBJC", "__inst_meth",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_cls_refs")
- return ParseDirectiveSectionSwitch("__OBJC", "__cls_refs",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
- MCSectionMachO::S_LITERAL_POINTERS,
- 4);
- if (IDVal == ".objc_message_refs")
- return ParseDirectiveSectionSwitch("__OBJC", "__message_refs",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
- MCSectionMachO::S_LITERAL_POINTERS,
- 4);
- if (IDVal == ".objc_symbols")
- return ParseDirectiveSectionSwitch("__OBJC", "__symbols",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_category")
- return ParseDirectiveSectionSwitch("__OBJC", "__category",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_class_vars")
- return ParseDirectiveSectionSwitch("__OBJC", "__class_vars",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_instance_vars")
- return ParseDirectiveSectionSwitch("__OBJC", "__instance_vars",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_module_info")
- return ParseDirectiveSectionSwitch("__OBJC", "__module_info",
- MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
- if (IDVal == ".objc_class_names")
- return ParseDirectiveSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
- if (IDVal == ".objc_meth_var_types")
- return ParseDirectiveSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
- if (IDVal == ".objc_meth_var_names")
- return ParseDirectiveSectionSwitch("__TEXT", "__cstring",
- MCSectionMachO::S_CSTRING_LITERALS);
- if (IDVal == ".objc_selector_strs")
- return ParseDirectiveSectionSwitch("__OBJC", "__selector_strs",
- MCSectionMachO::S_CSTRING_LITERALS);
-
- if (IDVal == ".tdata")
- return ParseDirectiveSectionSwitch("__DATA", "__thread_data",
- MCSectionMachO::S_THREAD_LOCAL_REGULAR);
- if (IDVal == ".tlv")
- return ParseDirectiveSectionSwitch("__DATA", "__thread_vars",
- MCSectionMachO::S_THREAD_LOCAL_VARIABLES);
- if (IDVal == ".thread_init_func")
- return ParseDirectiveSectionSwitch("__DATA", "__thread_init",
- MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS);
-
// Assembler features
if (IDVal == ".set")
return ParseDirectiveSet();
@@ -756,36 +645,25 @@ bool AsmParser::ParseStatement() {
return ParseDirectiveSymbolAttribute(MCSA_WeakDefinition);
if (IDVal == ".weak_reference")
return ParseDirectiveSymbolAttribute(MCSA_WeakReference);
+ if (IDVal == ".weak_def_can_be_hidden")
+ return ParseDirectiveSymbolAttribute(MCSA_WeakDefAutoPrivate);
if (IDVal == ".comm")
return ParseDirectiveComm(/*IsLocal=*/false);
if (IDVal == ".lcomm")
return ParseDirectiveComm(/*IsLocal=*/true);
- if (IDVal == ".zerofill")
- return ParseDirectiveDarwinZerofill();
- if (IDVal == ".desc")
- return ParseDirectiveDarwinSymbolDesc();
- if (IDVal == ".lsym")
- return ParseDirectiveDarwinLsym();
- if (IDVal == ".tbss")
- return ParseDirectiveDarwinTBSS();
-
- if (IDVal == ".subsections_via_symbols")
- return ParseDirectiveDarwinSubsectionsViaSymbols();
+
if (IDVal == ".abort")
return ParseDirectiveAbort();
if (IDVal == ".include")
return ParseDirectiveInclude();
- if (IDVal == ".dump")
- return ParseDirectiveDarwinDumpOrLoad(IDLoc, /*IsDump=*/true);
- if (IDVal == ".load")
- return ParseDirectiveDarwinDumpOrLoad(IDLoc, /*IsLoad=*/false);
-
- // Look up the handler in the handler table,
- bool(AsmParser::*Handler)(StringRef, SMLoc) = DirectiveMap[IDVal];
- if (Handler)
- return (this->*Handler)(IDVal, IDLoc);
-
+
+ // Look up the handler in the handler table.
+ std::pair<MCAsmParserExtension*, DirectiveHandler> Handler =
+ DirectiveMap.lookup(IDVal);
+ if (Handler.first)
+ return (Handler.first->*Handler.second)(IDVal, IDLoc);
+
// Target hook for parsing target specific directives.
if (!getTargetParser().ParseDirective(ID))
return false;
@@ -834,12 +712,11 @@ bool AsmParser::ParseStatement() {
return HadError;
}
-bool AsmParser::ParseAssignment(const StringRef &Name) {
+bool AsmParser::ParseAssignment(StringRef Name) {
// FIXME: Use better location, we should use proper tokens.
SMLoc EqualLoc = Lexer.getLoc();
const MCExpr *Value;
- SMLoc StartLoc = Lexer.getLoc();
if (ParseExpression(Value))
return true;
@@ -867,7 +744,7 @@ bool AsmParser::ParseAssignment(const StringRef &Name) {
return Error(EqualLoc, "invalid reassignment of non-absolute variable '" +
Name + "'");
} else
- Sym = CreateSymbol(Name);
+ Sym = getContext().GetOrCreateSymbol(Name);
// FIXME: Handle '.'.
@@ -902,90 +779,15 @@ bool AsmParser::ParseDirectiveSet() {
if (ParseIdentifier(Name))
return TokError("expected identifier after '.set' directive");
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.set'");
Lex();
return ParseAssignment(Name);
}
-/// ParseDirectiveSection:
-/// ::= .section identifier (',' identifier)*
-/// FIXME: This should actually parse out the segment, section, attributes and
-/// sizeof_stub fields.
-bool AsmParser::ParseDirectiveDarwinSection() {
- SMLoc Loc = Lexer.getLoc();
-
- StringRef SectionName;
- if (ParseIdentifier(SectionName))
- return Error(Loc, "expected identifier after '.section' directive");
-
- // Verify there is a following comma.
- if (!Lexer.is(AsmToken::Comma))
- return TokError("unexpected token in '.section' directive");
-
- std::string SectionSpec = SectionName;
- SectionSpec += ",";
-
- // Add all the tokens until the end of the line, ParseSectionSpecifier will
- // handle this.
- StringRef EOL = Lexer.LexUntilEndOfStatement();
- SectionSpec.append(EOL.begin(), EOL.end());
-
- Lex();
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.section' directive");
- Lex();
-
-
- StringRef Segment, Section;
- unsigned TAA, StubSize;
- std::string ErrorStr =
- MCSectionMachO::ParseSectionSpecifier(SectionSpec, Segment, Section,
- TAA, StubSize);
-
- if (!ErrorStr.empty())
- return Error(Loc, ErrorStr.c_str());
-
- // FIXME: Arch specific.
- bool isText = Segment == "__TEXT"; // FIXME: Hack.
- Out.SwitchSection(Ctx.getMachOSection(Segment, Section, TAA, StubSize,
- isText ? SectionKind::getText()
- : SectionKind::getDataRel()));
- return false;
-}
-
-/// ParseDirectiveSectionSwitch -
-bool AsmParser::ParseDirectiveSectionSwitch(const char *Segment,
- const char *Section,
- unsigned TAA, unsigned Align,
- unsigned StubSize) {
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in section switching directive");
- Lex();
-
- // FIXME: Arch specific.
- bool isText = StringRef(Segment) == "__TEXT"; // FIXME: Hack.
- Out.SwitchSection(Ctx.getMachOSection(Segment, Section, TAA, StubSize,
- isText ? SectionKind::getText()
- : SectionKind::getDataRel()));
-
- // Set the implicit alignment, if any.
- //
- // FIXME: This isn't really what 'as' does; I think it just uses the implicit
- // alignment on the section (e.g., if one manually inserts bytes into the
- // section, then just issueing the section switch directive will not realign
- // the section. However, this is arguably more reasonable behavior, and there
- // is no good reason for someone to intentionally emit incorrectly sized
- // values into the implicitly aligned sections.
- if (Align)
- Out.EmitValueToAlignment(Align, 0, 1, 0);
-
- return false;
-}
-
bool AsmParser::ParseEscapedString(std::string &Data) {
- assert(Lexer.is(AsmToken::String) && "Unexpected current token!");
+ assert(getLexer().is(AsmToken::String) && "Unexpected current token!");
Data = "";
StringRef Str = getTok().getStringContents();
@@ -1045,25 +847,25 @@ bool AsmParser::ParseEscapedString(std::string &Data) {
/// ParseDirectiveAscii:
/// ::= ( .ascii | .asciz ) [ "string" ( , "string" )* ]
bool AsmParser::ParseDirectiveAscii(bool ZeroTerminated) {
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
- if (Lexer.isNot(AsmToken::String))
+ if (getLexer().isNot(AsmToken::String))
return TokError("expected string in '.ascii' or '.asciz' directive");
-
+
std::string Data;
if (ParseEscapedString(Data))
return true;
-
- Out.EmitBytes(Data, DEFAULT_ADDRSPACE);
+
+ getStreamer().EmitBytes(Data, DEFAULT_ADDRSPACE);
if (ZeroTerminated)
- Out.EmitBytes(StringRef("\0", 1), DEFAULT_ADDRSPACE);
-
+ getStreamer().EmitBytes(StringRef("\0", 1), DEFAULT_ADDRSPACE);
+
Lex();
-
- if (Lexer.is(AsmToken::EndOfStatement))
+
+ if (getLexer().is(AsmToken::EndOfStatement))
break;
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.ascii' or '.asciz' directive");
Lex();
}
@@ -1076,24 +878,24 @@ bool AsmParser::ParseDirectiveAscii(bool ZeroTerminated) {
/// ParseDirectiveValue
/// ::= (.byte | .short | ... ) [ expression (, expression)* ]
bool AsmParser::ParseDirectiveValue(unsigned Size) {
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
- SMLoc ATTRIBUTE_UNUSED StartLoc = Lexer.getLoc();
+ SMLoc ATTRIBUTE_UNUSED StartLoc = getLexer().getLoc();
if (ParseExpression(Value))
return true;
// Special case constant expressions to match code generator.
if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value))
- Out.EmitIntValue(MCE->getValue(), Size, DEFAULT_ADDRSPACE);
+ getStreamer().EmitIntValue(MCE->getValue(), Size, DEFAULT_ADDRSPACE);
else
- Out.EmitValue(Value, Size, DEFAULT_ADDRSPACE);
+ getStreamer().EmitValue(Value, Size, DEFAULT_ADDRSPACE);
- if (Lexer.is(AsmToken::EndOfStatement))
+ if (getLexer().is(AsmToken::EndOfStatement))
break;
// FIXME: Improve diagnostic.
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
}
@@ -1111,18 +913,15 @@ bool AsmParser::ParseDirectiveSpace() {
return true;
int64_t FillExpr = 0;
- bool HasFillExpr = false;
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.space' directive");
Lex();
if (ParseAbsoluteExpression(FillExpr))
return true;
- HasFillExpr = true;
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.space' directive");
}
@@ -1132,7 +931,7 @@ bool AsmParser::ParseDirectiveSpace() {
return TokError("invalid number of bytes in '.space' directive");
// FIXME: Sometimes the fill expr is 'nop' if it isn't supplied, instead of 0.
- Out.EmitFill(NumBytes, FillExpr, DEFAULT_ADDRSPACE);
+ getStreamer().EmitFill(NumBytes, FillExpr, DEFAULT_ADDRSPACE);
return false;
}
@@ -1144,7 +943,7 @@ bool AsmParser::ParseDirectiveFill() {
if (ParseAbsoluteExpression(NumValues))
return true;
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.fill' directive");
Lex();
@@ -1152,7 +951,7 @@ bool AsmParser::ParseDirectiveFill() {
if (ParseAbsoluteExpression(FillSize))
return true;
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.fill' directive");
Lex();
@@ -1160,7 +959,7 @@ bool AsmParser::ParseDirectiveFill() {
if (ParseAbsoluteExpression(FillExpr))
return true;
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.fill' directive");
Lex();
@@ -1169,7 +968,7 @@ bool AsmParser::ParseDirectiveFill() {
return TokError("invalid '.fill' size, expected 1, 2, 4, or 8");
for (uint64_t i = 0, e = NumValues; i != e; ++i)
- Out.EmitIntValue(FillExpr, FillSize, DEFAULT_ADDRSPACE);
+ getStreamer().EmitIntValue(FillExpr, FillSize, DEFAULT_ADDRSPACE);
return false;
}
@@ -1178,21 +977,20 @@ bool AsmParser::ParseDirectiveFill() {
/// ::= .org expression [ , expression ]
bool AsmParser::ParseDirectiveOrg() {
const MCExpr *Offset;
- SMLoc StartLoc = Lexer.getLoc();
if (ParseExpression(Offset))
return true;
// Parse optional fill expression.
int64_t FillExpr = 0;
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.org' directive");
Lex();
if (ParseAbsoluteExpression(FillExpr))
return true;
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.org' directive");
}
@@ -1200,7 +998,7 @@ bool AsmParser::ParseDirectiveOrg() {
// FIXME: Only limited forms of relocatable expressions are accepted here, it
// has to be relative to the current section.
- Out.EmitValueToOffset(Offset, FillExpr);
+ getStreamer().EmitValueToOffset(Offset, FillExpr);
return false;
}
@@ -1208,7 +1006,7 @@ bool AsmParser::ParseDirectiveOrg() {
/// ParseDirectiveAlign
/// ::= {.align, ...} expression [ , expression [ , expression ]]
bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
- SMLoc AlignmentLoc = Lexer.getLoc();
+ SMLoc AlignmentLoc = getLexer().getLoc();
int64_t Alignment;
if (ParseAbsoluteExpression(Alignment))
return true;
@@ -1217,30 +1015,30 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
bool HasFillExpr = false;
int64_t FillExpr = 0;
int64_t MaxBytesToFill = 0;
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
// The fill expression can be omitted while specifying a maximum number of
// alignment bytes, e.g:
// .align 3,,4
- if (Lexer.isNot(AsmToken::Comma)) {
+ if (getLexer().isNot(AsmToken::Comma)) {
HasFillExpr = true;
if (ParseAbsoluteExpression(FillExpr))
return true;
}
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
- MaxBytesLoc = Lexer.getLoc();
+ MaxBytesLoc = getLexer().getLoc();
if (ParseAbsoluteExpression(MaxBytesToFill))
return true;
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in directive");
}
}
@@ -1282,14 +1080,14 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
// FIXME: This should be using a target hook.
bool UseCodeAlign = false;
if (const MCSectionMachO *S = dyn_cast<MCSectionMachO>(
- Out.getCurrentSection()))
+ getStreamer().getCurrentSection()))
UseCodeAlign = S->hasAttribute(MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
if ((!HasFillExpr || Lexer.getMAI().getTextAlignFillValue() == FillExpr) &&
ValueSize == 1 && UseCodeAlign) {
- Out.EmitCodeAlignment(Alignment, MaxBytesToFill);
+ getStreamer().EmitCodeAlignment(Alignment, MaxBytesToFill);
} else {
// FIXME: Target specific behavior about how the "extra" bytes are filled.
- Out.EmitValueToAlignment(Alignment, FillExpr, ValueSize, MaxBytesToFill);
+ getStreamer().EmitValueToAlignment(Alignment, FillExpr, ValueSize, MaxBytesToFill);
}
return false;
@@ -1298,21 +1096,21 @@ bool AsmParser::ParseDirectiveAlign(bool IsPow2, unsigned ValueSize) {
/// ParseDirectiveSymbolAttribute
/// ::= { ".globl", ".weak", ... } [ identifier ( , identifier )* ]
bool AsmParser::ParseDirectiveSymbolAttribute(MCSymbolAttr Attr) {
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
StringRef Name;
if (ParseIdentifier(Name))
return TokError("expected identifier in directive");
- MCSymbol *Sym = CreateSymbol(Name);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- Out.EmitSymbolAttribute(Sym, Attr);
+ getStreamer().EmitSymbolAttribute(Sym, Attr);
- if (Lexer.is(AsmToken::EndOfStatement))
+ if (getLexer().is(AsmToken::EndOfStatement))
break;
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
}
@@ -1330,20 +1128,20 @@ bool AsmParser::ParseDirectiveELFType() {
return TokError("expected identifier in directive");
// Handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(Name);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in '.type' directive");
Lex();
- if (Lexer.isNot(AsmToken::At))
+ if (getLexer().isNot(AsmToken::At))
return TokError("expected '@' before type");
Lex();
StringRef Type;
SMLoc TypeLoc;
- TypeLoc = Lexer.getLoc();
+ TypeLoc = getLexer().getLoc();
if (ParseIdentifier(Type))
return TokError("expected symbol type in directive");
@@ -1358,42 +1156,12 @@ bool AsmParser::ParseDirectiveELFType() {
if (Attr == MCSA_Invalid)
return Error(TypeLoc, "unsupported attribute in '.type' directive");
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.type' directive");
Lex();
- Out.EmitSymbolAttribute(Sym, Attr);
-
- return false;
-}
-
-/// ParseDirectiveDarwinSymbolDesc
-/// ::= .desc identifier , expression
-bool AsmParser::ParseDirectiveDarwinSymbolDesc() {
- StringRef Name;
- if (ParseIdentifier(Name))
- return TokError("expected identifier in directive");
-
- // Handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(Name);
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in '.desc' directive");
- Lex();
-
- SMLoc DescLoc = Lexer.getLoc();
- int64_t DescValue;
- if (ParseAbsoluteExpression(DescValue))
- return true;
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.desc' directive");
-
- Lex();
-
- // Set the n_desc field of this Symbol to this DescValue
- Out.EmitSymbolDesc(Sym, DescValue);
+ getStreamer().EmitSymbolAttribute(Sym, Attr);
return false;
}
@@ -1401,28 +1169,28 @@ bool AsmParser::ParseDirectiveDarwinSymbolDesc() {
/// ParseDirectiveComm
/// ::= ( .comm | .lcomm ) identifier , size_expression [ , align_expression ]
bool AsmParser::ParseDirectiveComm(bool IsLocal) {
- SMLoc IDLoc = Lexer.getLoc();
+ SMLoc IDLoc = getLexer().getLoc();
StringRef Name;
if (ParseIdentifier(Name))
return TokError("expected identifier in directive");
// Handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(Name);
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
- if (Lexer.isNot(AsmToken::Comma))
+ if (getLexer().isNot(AsmToken::Comma))
return TokError("unexpected token in directive");
Lex();
int64_t Size;
- SMLoc SizeLoc = Lexer.getLoc();
+ SMLoc SizeLoc = getLexer().getLoc();
if (ParseAbsoluteExpression(Size))
return true;
int64_t Pow2Alignment = 0;
SMLoc Pow2AlignmentLoc;
- if (Lexer.is(AsmToken::Comma)) {
+ if (getLexer().is(AsmToken::Comma)) {
Lex();
- Pow2AlignmentLoc = Lexer.getLoc();
+ Pow2AlignmentLoc = getLexer().getLoc();
if (ParseAbsoluteExpression(Pow2Alignment))
return true;
@@ -1434,7 +1202,7 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
}
}
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.comm' or '.lcomm' directive");
Lex();
@@ -1458,168 +1226,14 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) {
// '.lcomm' is equivalent to '.zerofill'.
// Create the Symbol as a common or local common with Size and Pow2Alignment
if (IsLocal) {
- Out.EmitZerofill(Ctx.getMachOSection("__DATA", "__bss",
- MCSectionMachO::S_ZEROFILL, 0,
- SectionKind::getBSS()),
- Sym, Size, 1 << Pow2Alignment);
+ getStreamer().EmitZerofill(Ctx.getMachOSection(
+ "__DATA", "__bss", MCSectionMachO::S_ZEROFILL,
+ 0, SectionKind::getBSS()),
+ Sym, Size, 1 << Pow2Alignment);
return false;
}
- Out.EmitCommonSymbol(Sym, Size, 1 << Pow2Alignment);
- return false;
-}
-
-/// ParseDirectiveDarwinZerofill
-/// ::= .zerofill segname , sectname [, identifier , size_expression [
-/// , align_expression ]]
-bool AsmParser::ParseDirectiveDarwinZerofill() {
- StringRef Segment;
- if (ParseIdentifier(Segment))
- return TokError("expected segment name after '.zerofill' directive");
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in directive");
- Lex();
-
- StringRef Section;
- if (ParseIdentifier(Section))
- return TokError("expected section name after comma in '.zerofill' "
- "directive");
-
- // If this is the end of the line all that was wanted was to create the
- // the section but with no symbol.
- if (Lexer.is(AsmToken::EndOfStatement)) {
- // Create the zerofill section but no symbol
- Out.EmitZerofill(Ctx.getMachOSection(Segment, Section,
- MCSectionMachO::S_ZEROFILL, 0,
- SectionKind::getBSS()));
- return false;
- }
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in directive");
- Lex();
-
- SMLoc IDLoc = Lexer.getLoc();
- StringRef IDStr;
- if (ParseIdentifier(IDStr))
- return TokError("expected identifier in directive");
-
- // handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(IDStr);
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in directive");
- Lex();
-
- int64_t Size;
- SMLoc SizeLoc = Lexer.getLoc();
- if (ParseAbsoluteExpression(Size))
- return true;
-
- int64_t Pow2Alignment = 0;
- SMLoc Pow2AlignmentLoc;
- if (Lexer.is(AsmToken::Comma)) {
- Lex();
- Pow2AlignmentLoc = Lexer.getLoc();
- if (ParseAbsoluteExpression(Pow2Alignment))
- return true;
- }
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.zerofill' directive");
-
- Lex();
-
- if (Size < 0)
- return Error(SizeLoc, "invalid '.zerofill' directive size, can't be less "
- "than zero");
-
- // NOTE: The alignment in the directive is a power of 2 value, the assembler
- // may internally end up wanting an alignment in bytes.
- // FIXME: Diagnose overflow.
- if (Pow2Alignment < 0)
- return Error(Pow2AlignmentLoc, "invalid '.zerofill' directive alignment, "
- "can't be less than zero");
-
- if (!Sym->isUndefined())
- return Error(IDLoc, "invalid symbol redefinition");
-
- // Create the zerofill Symbol with Size and Pow2Alignment
- //
- // FIXME: Arch specific.
- Out.EmitZerofill(Ctx.getMachOSection(Segment, Section,
- MCSectionMachO::S_ZEROFILL, 0,
- SectionKind::getBSS()),
- Sym, Size, 1 << Pow2Alignment);
-
- return false;
-}
-
-/// ParseDirectiveDarwinTBSS
-/// ::= .tbss identifier, size, align
-bool AsmParser::ParseDirectiveDarwinTBSS() {
- SMLoc IDLoc = Lexer.getLoc();
- StringRef Name;
- if (ParseIdentifier(Name))
- return TokError("expected identifier in directive");
-
- // Handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(Name);
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in directive");
- Lex();
-
- int64_t Size;
- SMLoc SizeLoc = Lexer.getLoc();
- if (ParseAbsoluteExpression(Size))
- return true;
-
- int64_t Pow2Alignment = 0;
- SMLoc Pow2AlignmentLoc;
- if (Lexer.is(AsmToken::Comma)) {
- Lex();
- Pow2AlignmentLoc = Lexer.getLoc();
- if (ParseAbsoluteExpression(Pow2Alignment))
- return true;
- }
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.tbss' directive");
-
- Lex();
-
- if (Size < 0)
- return Error(SizeLoc, "invalid '.tbss' directive size, can't be less than"
- "zero");
-
- // FIXME: Diagnose overflow.
- if (Pow2Alignment < 0)
- return Error(Pow2AlignmentLoc, "invalid '.tbss' alignment, can't be less"
- "than zero");
-
- if (!Sym->isUndefined())
- return Error(IDLoc, "invalid symbol redefinition");
-
- Out.EmitTBSSSymbol(Ctx.getMachOSection("__DATA", "__thread_bss",
- MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
- 0, SectionKind::getThreadBSS()),
- Sym, Size, 1 << Pow2Alignment);
-
- return false;
-}
-
-/// ParseDirectiveDarwinSubsectionsViaSymbols
-/// ::= .subsections_via_symbols
-bool AsmParser::ParseDirectiveDarwinSubsectionsViaSymbols() {
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.subsections_via_symbols' directive");
-
- Lex();
-
- Out.EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
-
+ getStreamer().EmitCommonSymbol(Sym, Size, 1 << Pow2Alignment);
return false;
}
@@ -1627,11 +1241,11 @@ bool AsmParser::ParseDirectiveDarwinSubsectionsViaSymbols() {
/// ::= .abort [ "abort_string" ]
bool AsmParser::ParseDirectiveAbort() {
// FIXME: Use loc from directive.
- SMLoc Loc = Lexer.getLoc();
+ SMLoc Loc = getLexer().getLoc();
StringRef Str = "";
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::String))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::String))
return TokError("expected string in '.abort' directive");
Str = getTok().getString();
@@ -1639,7 +1253,7 @@ bool AsmParser::ParseDirectiveAbort() {
Lex();
}
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.abort' directive");
Lex();
@@ -1653,48 +1267,17 @@ bool AsmParser::ParseDirectiveAbort() {
return false;
}
-/// ParseDirectiveLsym
-/// ::= .lsym identifier , expression
-bool AsmParser::ParseDirectiveDarwinLsym() {
- StringRef Name;
- if (ParseIdentifier(Name))
- return TokError("expected identifier in directive");
-
- // Handle the identifier as the key symbol.
- MCSymbol *Sym = CreateSymbol(Name);
-
- if (Lexer.isNot(AsmToken::Comma))
- return TokError("unexpected token in '.lsym' directive");
- Lex();
-
- const MCExpr *Value;
- SMLoc StartLoc = Lexer.getLoc();
- if (ParseExpression(Value))
- return true;
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.lsym' directive");
-
- Lex();
-
- // We don't currently support this directive.
- //
- // FIXME: Diagnostic location!
- (void) Sym;
- return TokError("directive '.lsym' is unsupported");
-}
-
/// ParseDirectiveInclude
/// ::= .include "filename"
bool AsmParser::ParseDirectiveInclude() {
- if (Lexer.isNot(AsmToken::String))
+ if (getLexer().isNot(AsmToken::String))
return TokError("expected string in '.include' directive");
std::string Filename = getTok().getString();
- SMLoc IncludeLoc = Lexer.getLoc();
+ SMLoc IncludeLoc = getLexer().getLoc();
Lex();
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.include' directive");
// Strip the quotes.
@@ -1712,29 +1295,6 @@ bool AsmParser::ParseDirectiveInclude() {
return false;
}
-/// ParseDirectiveDarwinDumpOrLoad
-/// ::= ( .dump | .load ) "filename"
-bool AsmParser::ParseDirectiveDarwinDumpOrLoad(SMLoc IDLoc, bool IsDump) {
- if (Lexer.isNot(AsmToken::String))
- return TokError("expected string in '.dump' or '.load' directive");
-
- Lex();
-
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.dump' or '.load' directive");
-
- Lex();
-
- // FIXME: If/when .dump and .load are implemented they will be done in the
- // the assembly parser and not have any need for an MCStreamer API.
- if (IsDump)
- Warning(IDLoc, "ignoring directive .dump for now");
- else
- Warning(IDLoc, "ignoring directive .load for now");
-
- return false;
-}
-
/// ParseDirectiveIf
/// ::= .if expression
bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
@@ -1748,7 +1308,7 @@ bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
if (ParseAbsoluteExpression(ExprValue))
return true;
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.if' directive");
Lex();
@@ -1781,7 +1341,7 @@ bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) {
if (ParseAbsoluteExpression(ExprValue))
return true;
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.elseif' directive");
Lex();
@@ -1795,7 +1355,7 @@ bool AsmParser::ParseDirectiveElseIf(SMLoc DirectiveLoc) {
/// ParseDirectiveElse
/// ::= .else
bool AsmParser::ParseDirectiveElse(SMLoc DirectiveLoc) {
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.else' directive");
Lex();
@@ -1819,7 +1379,7 @@ bool AsmParser::ParseDirectiveElse(SMLoc DirectiveLoc) {
/// ParseDirectiveEndIf
/// ::= .endif
bool AsmParser::ParseDirectiveEndIf(SMLoc DirectiveLoc) {
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.endif' directive");
Lex();
@@ -1838,40 +1398,40 @@ bool AsmParser::ParseDirectiveEndIf(SMLoc DirectiveLoc) {
/// ParseDirectiveFile
/// ::= .file [number] string
-bool AsmParser::ParseDirectiveFile(StringRef, SMLoc DirectiveLoc) {
+bool GenericAsmParser::ParseDirectiveFile(StringRef, SMLoc DirectiveLoc) {
// FIXME: I'm not sure what this is.
int64_t FileNumber = -1;
- if (Lexer.is(AsmToken::Integer)) {
+ if (getLexer().is(AsmToken::Integer)) {
FileNumber = getTok().getIntVal();
Lex();
-
+
if (FileNumber < 1)
return TokError("file number less than one");
}
- if (Lexer.isNot(AsmToken::String))
+ if (getLexer().isNot(AsmToken::String))
return TokError("unexpected token in '.file' directive");
-
+
StringRef Filename = getTok().getString();
Filename = Filename.substr(1, Filename.size()-2);
Lex();
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.file' directive");
if (FileNumber == -1)
- Out.EmitFileDirective(Filename);
+ getStreamer().EmitFileDirective(Filename);
else
- Out.EmitDwarfFileDirective(FileNumber, Filename);
-
+ getStreamer().EmitDwarfFileDirective(FileNumber, Filename);
+
return false;
}
/// ParseDirectiveLine
/// ::= .line [number]
-bool AsmParser::ParseDirectiveLine(StringRef, SMLoc DirectiveLoc) {
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Integer))
+bool GenericAsmParser::ParseDirectiveLine(StringRef, SMLoc DirectiveLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Integer))
return TokError("unexpected token in '.line' directive");
int64_t LineNumber = getTok().getIntVal();
@@ -1881,8 +1441,8 @@ bool AsmParser::ParseDirectiveLine(StringRef, SMLoc DirectiveLoc) {
// FIXME: Do something with the .line.
}
- if (Lexer.isNot(AsmToken::EndOfStatement))
- return TokError("unexpected token in '.file' directive");
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.line' directive");
return false;
}
@@ -1890,8 +1450,8 @@ bool AsmParser::ParseDirectiveLine(StringRef, SMLoc DirectiveLoc) {
/// ParseDirectiveLoc
/// ::= .loc number [number [number]]
-bool AsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
- if (Lexer.isNot(AsmToken::Integer))
+bool GenericAsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
+ if (getLexer().isNot(AsmToken::Integer))
return TokError("unexpected token in '.loc' directive");
// FIXME: What are these fields?
@@ -1900,16 +1460,16 @@ bool AsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
// FIXME: Validate file.
Lex();
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Integer))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Integer))
return TokError("unexpected token in '.loc' directive");
int64_t Param2 = getTok().getIntVal();
(void) Param2;
Lex();
- if (Lexer.isNot(AsmToken::EndOfStatement)) {
- if (Lexer.isNot(AsmToken::Integer))
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ if (getLexer().isNot(AsmToken::Integer))
return TokError("unexpected token in '.loc' directive");
int64_t Param3 = getTok().getIntVal();
@@ -1920,7 +1480,7 @@ bool AsmParser::ParseDirectiveLoc(StringRef, SMLoc DirectiveLoc) {
}
}
- if (Lexer.isNot(AsmToken::EndOfStatement))
+ if (getLexer().isNot(AsmToken::EndOfStatement))
return TokError("unexpected token in '.file' directive");
return false;
diff --git a/contrib/llvm/lib/MC/MCParser/CMakeLists.txt b/contrib/llvm/lib/MC/MCParser/CMakeLists.txt
index a5c0818..25a7bf4 100644
--- a/contrib/llvm/lib/MC/MCParser/CMakeLists.txt
+++ b/contrib/llvm/lib/MC/MCParser/CMakeLists.txt
@@ -1,7 +1,10 @@
add_llvm_library(LLVMMCParser
AsmLexer.cpp
AsmParser.cpp
+ DarwinAsmParser.cpp
+ ELFAsmParser.cpp
MCAsmLexer.cpp
MCAsmParser.cpp
+ MCAsmParserExtension.cpp
TargetAsmParser.cpp
)
diff --git a/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
new file mode 100644
index 0000000..7d8639e
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCParser/DarwinAsmParser.cpp
@@ -0,0 +1,758 @@
+//===- DarwinAsmParser.cpp - Darwin (Mach-O) Assembly Parser --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+using namespace llvm;
+
+namespace {
+
+/// \brief Implementation of directive handling which is shared across all
+/// Darwin targets.
+class DarwinAsmParser : public MCAsmParserExtension {
+ bool ParseSectionSwitch(const char *Segment, const char *Section,
+ unsigned TAA = 0, unsigned ImplicitAlign = 0,
+ unsigned StubSize = 0);
+
+public:
+ DarwinAsmParser() {}
+
+ virtual void Initialize(MCAsmParser &Parser) {
+ // Call the base implementation.
+ this->MCAsmParserExtension::Initialize(Parser);
+
+ Parser.AddDirectiveHandler(this, ".desc", MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveDesc));
+ Parser.AddDirectiveHandler(this, ".lsym", MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveLsym));
+ Parser.AddDirectiveHandler(this, ".subsections_via_symbols",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveSubsectionsViaSymbols));
+ Parser.AddDirectiveHandler(this, ".dump", MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveDumpOrLoad));
+ Parser.AddDirectiveHandler(this, ".load", MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveDumpOrLoad));
+ Parser.AddDirectiveHandler(this, ".section", MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveSection));
+ Parser.AddDirectiveHandler(this, ".secure_log_unique",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveSecureLogUnique));
+ Parser.AddDirectiveHandler(this, ".secure_log_reset",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveSecureLogReset));
+ Parser.AddDirectiveHandler(this, ".tbss",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveTBSS));
+ Parser.AddDirectiveHandler(this, ".zerofill",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseDirectiveZerofill));
+
+ // Special section directives.
+ Parser.AddDirectiveHandler(this, ".const",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveConst));
+ Parser.AddDirectiveHandler(this, ".const_data",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveConstData));
+ Parser.AddDirectiveHandler(this, ".constructor",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveConstructor));
+ Parser.AddDirectiveHandler(this, ".cstring",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveCString));
+ Parser.AddDirectiveHandler(this, ".data",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveData));
+ Parser.AddDirectiveHandler(this, ".destructor",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveDestructor));
+ Parser.AddDirectiveHandler(this, ".dyld",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveDyld));
+ Parser.AddDirectiveHandler(this, ".fvmlib_init0",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveFVMLibInit0));
+ Parser.AddDirectiveHandler(this, ".fvmlib_init1",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveFVMLibInit1));
+ Parser.AddDirectiveHandler(this, ".lazy_symbol_pointer",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveLazySymbolPointers));
+ Parser.AddDirectiveHandler(this, ".literal16",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveLiteral16));
+ Parser.AddDirectiveHandler(this, ".literal4",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveLiteral4));
+ Parser.AddDirectiveHandler(this, ".literal8",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveLiteral8));
+ Parser.AddDirectiveHandler(this, ".mod_init_func",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveModInitFunc));
+ Parser.AddDirectiveHandler(this, ".mod_term_func",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveModTermFunc));
+ Parser.AddDirectiveHandler(this, ".non_lazy_symbol_pointer",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveNonLazySymbolPointers));
+ Parser.AddDirectiveHandler(this, ".objc_cat_cls_meth",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCCatClsMeth));
+ Parser.AddDirectiveHandler(this, ".objc_cat_inst_meth",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCCatInstMeth));
+ Parser.AddDirectiveHandler(this, ".objc_category",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCCategory));
+ Parser.AddDirectiveHandler(this, ".objc_class",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCClass));
+ Parser.AddDirectiveHandler(this, ".objc_class_names",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCClassNames));
+ Parser.AddDirectiveHandler(this, ".objc_class_vars",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCClassVars));
+ Parser.AddDirectiveHandler(this, ".objc_cls_meth",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCClsMeth));
+ Parser.AddDirectiveHandler(this, ".objc_cls_refs",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCClsRefs));
+ Parser.AddDirectiveHandler(this, ".objc_inst_meth",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCInstMeth));
+ Parser.AddDirectiveHandler(this, ".objc_instance_vars",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCInstanceVars));
+ Parser.AddDirectiveHandler(this, ".objc_message_refs",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCMessageRefs));
+ Parser.AddDirectiveHandler(this, ".objc_meta_class",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCMetaClass));
+ Parser.AddDirectiveHandler(this, ".objc_meth_var_names",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCMethVarNames));
+ Parser.AddDirectiveHandler(this, ".objc_meth_var_types",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCMethVarTypes));
+ Parser.AddDirectiveHandler(this, ".objc_module_info",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCModuleInfo));
+ Parser.AddDirectiveHandler(this, ".objc_protocol",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCProtocol));
+ Parser.AddDirectiveHandler(this, ".objc_selector_strs",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCSelectorStrs));
+ Parser.AddDirectiveHandler(this, ".objc_string_object",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCStringObject));
+ Parser.AddDirectiveHandler(this, ".objc_symbols",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveObjCSymbols));
+ Parser.AddDirectiveHandler(this, ".picsymbol_stub",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectivePICSymbolStub));
+ Parser.AddDirectiveHandler(this, ".static_const",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveStaticConst));
+ Parser.AddDirectiveHandler(this, ".static_data",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveStaticData));
+ Parser.AddDirectiveHandler(this, ".symbol_stub",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveSymbolStub));
+ Parser.AddDirectiveHandler(this, ".tdata",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveTData));
+ Parser.AddDirectiveHandler(this, ".text",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveText));
+ Parser.AddDirectiveHandler(this, ".thread_init_func",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveThreadInitFunc));
+ Parser.AddDirectiveHandler(this, ".tlv",
+ MCAsmParser::DirectiveHandler(
+ &DarwinAsmParser::ParseSectionDirectiveTLV));
+ }
+
+ bool ParseDirectiveDesc(StringRef, SMLoc);
+ bool ParseDirectiveDumpOrLoad(StringRef, SMLoc);
+ bool ParseDirectiveLsym(StringRef, SMLoc);
+ bool ParseDirectiveSection();
+ bool ParseDirectiveSecureLogReset(StringRef, SMLoc);
+ bool ParseDirectiveSecureLogUnique(StringRef, SMLoc);
+ bool ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc);
+ bool ParseDirectiveTBSS(StringRef, SMLoc);
+ bool ParseDirectiveZerofill(StringRef, SMLoc);
+
+ // Named Section Directive
+ bool ParseSectionDirectiveConst(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__const");
+ }
+ bool ParseSectionDirectiveStaticConst(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__static_const");
+ }
+ bool ParseSectionDirectiveCString(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__cstring",
+ MCSectionMachO::S_CSTRING_LITERALS);
+ }
+ bool ParseSectionDirectiveLiteral4(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__literal4",
+ MCSectionMachO::S_4BYTE_LITERALS, 4);
+ }
+ bool ParseSectionDirectiveLiteral8(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__literal8",
+ MCSectionMachO::S_8BYTE_LITERALS, 8);
+ }
+ bool ParseSectionDirectiveLiteral16(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__literal16",
+ MCSectionMachO::S_16BYTE_LITERALS, 16);
+ }
+ bool ParseSectionDirectiveConstructor(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__constructor");
+ }
+ bool ParseSectionDirectiveDestructor(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__destructor");
+ }
+ bool ParseSectionDirectiveFVMLibInit0(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__fvmlib_init0");
+ }
+ bool ParseSectionDirectiveFVMLibInit1(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__fvmlib_init1");
+ }
+ bool ParseSectionDirectiveSymbolStub(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__symbol_stub",
+ MCSectionMachO::S_SYMBOL_STUBS |
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS,
+ // FIXME: Different on PPC and ARM.
+ 0, 16);
+ }
+ bool ParseSectionDirectivePICSymbolStub(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT","__picsymbol_stub",
+ MCSectionMachO::S_SYMBOL_STUBS |
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS, 0, 26);
+ }
+ bool ParseSectionDirectiveData(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__data");
+ }
+ bool ParseSectionDirectiveStaticData(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__static_data");
+ }
+ bool ParseSectionDirectiveNonLazySymbolPointers(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__nl_symbol_ptr",
+ MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveLazySymbolPointers(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__la_symbol_ptr",
+ MCSectionMachO::S_LAZY_SYMBOL_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveDyld(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__dyld");
+ }
+ bool ParseSectionDirectiveModInitFunc(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__mod_init_func",
+ MCSectionMachO::S_MOD_INIT_FUNC_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveModTermFunc(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__mod_term_func",
+ MCSectionMachO::S_MOD_TERM_FUNC_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveConstData(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__const");
+ }
+ bool ParseSectionDirectiveObjCClass(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__class",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCMetaClass(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__meta_class",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCCatClsMeth(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__cat_cls_meth",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCCatInstMeth(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__cat_inst_meth",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCProtocol(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__protocol",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCStringObject(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__string_object",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCClsMeth(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__cls_meth",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCInstMeth(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__inst_meth",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCClsRefs(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__cls_refs",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
+ MCSectionMachO::S_LITERAL_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveObjCMessageRefs(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__message_refs",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP |
+ MCSectionMachO::S_LITERAL_POINTERS, 4);
+ }
+ bool ParseSectionDirectiveObjCSymbols(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__symbols",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCCategory(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__category",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCClassVars(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__class_vars",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCInstanceVars(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__instance_vars",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCModuleInfo(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__module_info",
+ MCSectionMachO::S_ATTR_NO_DEAD_STRIP);
+ }
+ bool ParseSectionDirectiveObjCClassNames(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__cstring",
+ MCSectionMachO::S_CSTRING_LITERALS);
+ }
+ bool ParseSectionDirectiveObjCMethVarTypes(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__cstring",
+ MCSectionMachO::S_CSTRING_LITERALS);
+ }
+ bool ParseSectionDirectiveObjCMethVarNames(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__cstring",
+ MCSectionMachO::S_CSTRING_LITERALS);
+ }
+ bool ParseSectionDirectiveObjCSelectorStrs(StringRef, SMLoc) {
+ return ParseSectionSwitch("__OBJC", "__selector_strs",
+ MCSectionMachO::S_CSTRING_LITERALS);
+ }
+ bool ParseSectionDirectiveTData(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__thread_data",
+ MCSectionMachO::S_THREAD_LOCAL_REGULAR);
+ }
+ bool ParseSectionDirectiveText(StringRef, SMLoc) {
+ return ParseSectionSwitch("__TEXT", "__text",
+ MCSectionMachO::S_ATTR_PURE_INSTRUCTIONS);
+ }
+ bool ParseSectionDirectiveTLV(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__thread_vars",
+ MCSectionMachO::S_THREAD_LOCAL_VARIABLES);
+ }
+ bool ParseSectionDirectiveThreadInitFunc(StringRef, SMLoc) {
+ return ParseSectionSwitch("__DATA", "__thread_init",
+ MCSectionMachO::S_THREAD_LOCAL_INIT_FUNCTION_POINTERS);
+ }
+
+};
+
+}
+
+bool DarwinAsmParser::ParseSectionSwitch(const char *Segment,
+ const char *Section,
+ unsigned TAA, unsigned Align,
+ unsigned StubSize) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in section switching directive");
+ Lex();
+
+ // FIXME: Arch specific.
+ bool isText = StringRef(Segment) == "__TEXT"; // FIXME: Hack.
+ getStreamer().SwitchSection(getContext().getMachOSection(
+ Segment, Section, TAA, StubSize,
+ isText ? SectionKind::getText()
+ : SectionKind::getDataRel()));
+
+ // Set the implicit alignment, if any.
+ //
+ // FIXME: This isn't really what 'as' does; I think it just uses the implicit
+ // alignment on the section (e.g., if one manually inserts bytes into the
+ // section, then just issueing the section switch directive will not realign
+ // the section. However, this is arguably more reasonable behavior, and there
+ // is no good reason for someone to intentionally emit incorrectly sized
+ // values into the implicitly aligned sections.
+ if (Align)
+ getStreamer().EmitValueToAlignment(Align, 0, 1, 0);
+
+ return false;
+}
+
+/// ParseDirectiveDesc
+/// ::= .desc identifier , expression
+bool DarwinAsmParser::ParseDirectiveDesc(StringRef, SMLoc) {
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ // Handle the identifier as the key symbol.
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '.desc' directive");
+ Lex();
+
+ int64_t DescValue;
+ if (getParser().ParseAbsoluteExpression(DescValue))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.desc' directive");
+
+ Lex();
+
+ // Set the n_desc field of this Symbol to this DescValue
+ getStreamer().EmitSymbolDesc(Sym, DescValue);
+
+ return false;
+}
+
+/// ParseDirectiveDumpOrLoad
+/// ::= ( .dump | .load ) "filename"
+bool DarwinAsmParser::ParseDirectiveDumpOrLoad(StringRef Directive,
+ SMLoc IDLoc) {
+ bool IsDump = Directive == ".dump";
+ if (getLexer().isNot(AsmToken::String))
+ return TokError("expected string in '.dump' or '.load' directive");
+
+ Lex();
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.dump' or '.load' directive");
+
+ Lex();
+
+ // FIXME: If/when .dump and .load are implemented they will be done in the
+ // the assembly parser and not have any need for an MCStreamer API.
+ if (IsDump)
+ Warning(IDLoc, "ignoring directive .dump for now");
+ else
+ Warning(IDLoc, "ignoring directive .load for now");
+
+ return false;
+}
+
+/// ParseDirectiveLsym
+/// ::= .lsym identifier , expression
+bool DarwinAsmParser::ParseDirectiveLsym(StringRef, SMLoc) {
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ // Handle the identifier as the key symbol.
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in '.lsym' directive");
+ Lex();
+
+ const MCExpr *Value;
+ if (getParser().ParseExpression(Value))
+ return true;
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.lsym' directive");
+
+ Lex();
+
+ // We don't currently support this directive.
+ //
+ // FIXME: Diagnostic location!
+ (void) Sym;
+ return TokError("directive '.lsym' is unsupported");
+}
+
+/// ParseDirectiveSection:
+/// ::= .section identifier (',' identifier)*
+bool DarwinAsmParser::ParseDirectiveSection() {
+ SMLoc Loc = getLexer().getLoc();
+
+ StringRef SectionName;
+ if (getParser().ParseIdentifier(SectionName))
+ return Error(Loc, "expected identifier after '.section' directive");
+
+ // Verify there is a following comma.
+ if (!getLexer().is(AsmToken::Comma))
+ return TokError("unexpected token in '.section' directive");
+
+ std::string SectionSpec = SectionName;
+ SectionSpec += ",";
+
+ // Add all the tokens until the end of the line, ParseSectionSpecifier will
+ // handle this.
+ StringRef EOL = getLexer().LexUntilEndOfStatement();
+ SectionSpec.append(EOL.begin(), EOL.end());
+
+ Lex();
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.section' directive");
+ Lex();
+
+
+ StringRef Segment, Section;
+ unsigned TAA, StubSize;
+ std::string ErrorStr =
+ MCSectionMachO::ParseSectionSpecifier(SectionSpec, Segment, Section,
+ TAA, StubSize);
+
+ if (!ErrorStr.empty())
+ return Error(Loc, ErrorStr.c_str());
+
+ // FIXME: Arch specific.
+ bool isText = Segment == "__TEXT"; // FIXME: Hack.
+ getStreamer().SwitchSection(getContext().getMachOSection(
+ Segment, Section, TAA, StubSize,
+ isText ? SectionKind::getText()
+ : SectionKind::getDataRel()));
+ return false;
+}
+
+/// ParseDirectiveSecureLogUnique
+/// ::= .secure_log_unique "log message"
+bool DarwinAsmParser::ParseDirectiveSecureLogUnique(StringRef, SMLoc IDLoc) {
+ std::string LogMessage;
+
+ if (getLexer().isNot(AsmToken::String))
+ LogMessage = "";
+ else{
+ LogMessage = getTok().getString();
+ Lex();
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.secure_log_unique' directive");
+
+ if (getContext().getSecureLogUsed() != false)
+ return Error(IDLoc, ".secure_log_unique specified multiple times");
+
+ char *SecureLogFile = getContext().getSecureLogFile();
+ if (SecureLogFile == NULL)
+ return Error(IDLoc, ".secure_log_unique used but AS_SECURE_LOG_FILE "
+ "environment variable unset.");
+
+ raw_ostream *OS = getContext().getSecureLog();
+ if (OS == NULL) {
+ std::string Err;
+ OS = new raw_fd_ostream(SecureLogFile, Err, raw_fd_ostream::F_Append);
+ if (!Err.empty()) {
+ delete OS;
+ return Error(IDLoc, Twine("can't open secure log file: ") +
+ SecureLogFile + " (" + Err + ")");
+ }
+ getContext().setSecureLog(OS);
+ }
+
+ int CurBuf = getSourceManager().FindBufferContainingLoc(IDLoc);
+ *OS << getSourceManager().getBufferInfo(CurBuf).Buffer->getBufferIdentifier()
+ << ":" << getSourceManager().FindLineNumber(IDLoc, CurBuf) << ":"
+ << LogMessage + "\n";
+
+ getContext().setSecureLogUsed(true);
+
+ return false;
+}
+
+/// ParseDirectiveSecureLogReset
+/// ::= .secure_log_reset
+bool DarwinAsmParser::ParseDirectiveSecureLogReset(StringRef, SMLoc IDLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.secure_log_reset' directive");
+
+ Lex();
+
+ getContext().setSecureLogUsed(false);
+
+ return false;
+}
+
+/// ParseDirectiveSubsectionsViaSymbols
+/// ::= .subsections_via_symbols
+bool DarwinAsmParser::ParseDirectiveSubsectionsViaSymbols(StringRef, SMLoc) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.subsections_via_symbols' directive");
+
+ Lex();
+
+ getStreamer().EmitAssemblerFlag(MCAF_SubsectionsViaSymbols);
+
+ return false;
+}
+
+/// ParseDirectiveTBSS
+/// ::= .tbss identifier, size, align
+bool DarwinAsmParser::ParseDirectiveTBSS(StringRef, SMLoc) {
+ SMLoc IDLoc = getLexer().getLoc();
+ StringRef Name;
+ if (getParser().ParseIdentifier(Name))
+ return TokError("expected identifier in directive");
+
+ // Handle the identifier as the key symbol.
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ int64_t Size;
+ SMLoc SizeLoc = getLexer().getLoc();
+ if (getParser().ParseAbsoluteExpression(Size))
+ return true;
+
+ int64_t Pow2Alignment = 0;
+ SMLoc Pow2AlignmentLoc;
+ if (getLexer().is(AsmToken::Comma)) {
+ Lex();
+ Pow2AlignmentLoc = getLexer().getLoc();
+ if (getParser().ParseAbsoluteExpression(Pow2Alignment))
+ return true;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.tbss' directive");
+
+ Lex();
+
+ if (Size < 0)
+ return Error(SizeLoc, "invalid '.tbss' directive size, can't be less than"
+ "zero");
+
+ // FIXME: Diagnose overflow.
+ if (Pow2Alignment < 0)
+ return Error(Pow2AlignmentLoc, "invalid '.tbss' alignment, can't be less"
+ "than zero");
+
+ if (!Sym->isUndefined())
+ return Error(IDLoc, "invalid symbol redefinition");
+
+ getStreamer().EmitTBSSSymbol(getContext().getMachOSection(
+ "__DATA", "__thread_bss",
+ MCSectionMachO::S_THREAD_LOCAL_ZEROFILL,
+ 0, SectionKind::getThreadBSS()),
+ Sym, Size, 1 << Pow2Alignment);
+
+ return false;
+}
+
+/// ParseDirectiveZerofill
+/// ::= .zerofill segname , sectname [, identifier , size_expression [
+/// , align_expression ]]
+bool DarwinAsmParser::ParseDirectiveZerofill(StringRef, SMLoc) {
+ StringRef Segment;
+ if (getParser().ParseIdentifier(Segment))
+ return TokError("expected segment name after '.zerofill' directive");
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ StringRef Section;
+ if (getParser().ParseIdentifier(Section))
+ return TokError("expected section name after comma in '.zerofill' "
+ "directive");
+
+ // If this is the end of the line all that was wanted was to create the
+ // the section but with no symbol.
+ if (getLexer().is(AsmToken::EndOfStatement)) {
+ // Create the zerofill section but no symbol
+ getStreamer().EmitZerofill(getContext().getMachOSection(
+ Segment, Section, MCSectionMachO::S_ZEROFILL,
+ 0, SectionKind::getBSS()));
+ return false;
+ }
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ SMLoc IDLoc = getLexer().getLoc();
+ StringRef IDStr;
+ if (getParser().ParseIdentifier(IDStr))
+ return TokError("expected identifier in directive");
+
+ // handle the identifier as the key symbol.
+ MCSymbol *Sym = getContext().GetOrCreateSymbol(IDStr);
+
+ if (getLexer().isNot(AsmToken::Comma))
+ return TokError("unexpected token in directive");
+ Lex();
+
+ int64_t Size;
+ SMLoc SizeLoc = getLexer().getLoc();
+ if (getParser().ParseAbsoluteExpression(Size))
+ return true;
+
+ int64_t Pow2Alignment = 0;
+ SMLoc Pow2AlignmentLoc;
+ if (getLexer().is(AsmToken::Comma)) {
+ Lex();
+ Pow2AlignmentLoc = getLexer().getLoc();
+ if (getParser().ParseAbsoluteExpression(Pow2Alignment))
+ return true;
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '.zerofill' directive");
+
+ Lex();
+
+ if (Size < 0)
+ return Error(SizeLoc, "invalid '.zerofill' directive size, can't be less "
+ "than zero");
+
+ // NOTE: The alignment in the directive is a power of 2 value, the assembler
+ // may internally end up wanting an alignment in bytes.
+ // FIXME: Diagnose overflow.
+ if (Pow2Alignment < 0)
+ return Error(Pow2AlignmentLoc, "invalid '.zerofill' directive alignment, "
+ "can't be less than zero");
+
+ if (!Sym->isUndefined())
+ return Error(IDLoc, "invalid symbol redefinition");
+
+ // Create the zerofill Symbol with Size and Pow2Alignment
+ //
+ // FIXME: Arch specific.
+ getStreamer().EmitZerofill(getContext().getMachOSection(
+ Segment, Section, MCSectionMachO::S_ZEROFILL,
+ 0, SectionKind::getBSS()),
+ Sym, Size, 1 << Pow2Alignment);
+
+ return false;
+}
+
+namespace llvm {
+
+MCAsmParserExtension *createDarwinAsmParser() {
+ return new DarwinAsmParser;
+}
+
+}
diff --git a/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
new file mode 100644
index 0000000..7a54dd3
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCParser/ELFAsmParser.cpp
@@ -0,0 +1,68 @@
+//===- ELFAsmParser.cpp - ELF Assembly Parser -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+using namespace llvm;
+
+namespace {
+
+class ELFAsmParser : public MCAsmParserExtension {
+ bool ParseSectionSwitch(StringRef Section, unsigned Type,
+ unsigned Flags, SectionKind Kind);
+
+public:
+ ELFAsmParser() {}
+
+ virtual void Initialize(MCAsmParser &Parser) {
+ // Call the base implementation.
+ this->MCAsmParserExtension::Initialize(Parser);
+
+ Parser.AddDirectiveHandler(this, ".data", MCAsmParser::DirectiveHandler(
+ &ELFAsmParser::ParseSectionDirectiveData));
+ Parser.AddDirectiveHandler(this, ".text", MCAsmParser::DirectiveHandler(
+ &ELFAsmParser::ParseSectionDirectiveText));
+ }
+
+ bool ParseSectionDirectiveData(StringRef, SMLoc) {
+ return ParseSectionSwitch(".data", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_WRITE |MCSectionELF::SHF_ALLOC,
+ SectionKind::getDataRel());
+ }
+ bool ParseSectionDirectiveText(StringRef, SMLoc) {
+ return ParseSectionSwitch(".text", MCSectionELF::SHT_PROGBITS,
+ MCSectionELF::SHF_EXECINSTR |
+ MCSectionELF::SHF_ALLOC, SectionKind::getText());
+ }
+};
+
+}
+
+bool ELFAsmParser::ParseSectionSwitch(StringRef Section, unsigned Type,
+ unsigned Flags, SectionKind Kind) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in section switching directive");
+ Lex();
+
+ getStreamer().SwitchSection(getContext().getELFSection(
+ Section, Type, Flags, Kind));
+
+ return false;
+}
+
+namespace llvm {
+
+MCAsmParserExtension *createELFAsmParser() {
+ return new ELFAsmParser;
+}
+
+}
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
index e5b2955..dceece7 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmLexer.cpp
@@ -12,12 +12,16 @@
using namespace llvm;
-MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()) {
+MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()), TokStart(0) {
}
MCAsmLexer::~MCAsmLexer() {
}
+SMLoc MCAsmLexer::getLoc() const {
+ return SMLoc::getFromPointer(TokStart);
+}
+
SMLoc AsmToken::getLoc() const {
return SMLoc::getFromPointer(Str.data());
}
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
index b8c2054..bee3064 100644
--- a/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmParser.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/Support/SourceMgr.h"
@@ -23,6 +24,11 @@ const AsmToken &MCAsmParser::getTok() {
return getLexer().getTok();
}
+bool MCAsmParser::TokError(const char *Msg) {
+ Error(getLexer().getLoc(), Msg);
+ return true;
+}
+
bool MCAsmParser::ParseExpression(const MCExpr *&Res) {
SMLoc L;
return ParseExpression(Res, L);
diff --git a/contrib/llvm/lib/MC/MCParser/MCAsmParserExtension.cpp b/contrib/llvm/lib/MC/MCParser/MCAsmParserExtension.cpp
new file mode 100644
index 0000000..c30d306
--- /dev/null
+++ b/contrib/llvm/lib/MC/MCParser/MCAsmParserExtension.cpp
@@ -0,0 +1,21 @@
+//===-- MCAsmParserExtension.cpp - Asm Parser Hooks -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+using namespace llvm;
+
+MCAsmParserExtension::MCAsmParserExtension() {
+}
+
+MCAsmParserExtension::~MCAsmParserExtension() {
+}
+
+void MCAsmParserExtension::Initialize(MCAsmParser &Parser) {
+ this->Parser = &Parser;
+}
diff --git a/contrib/llvm/lib/MC/MCSectionCOFF.cpp b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
index d57bb0c..eb53160 100644
--- a/contrib/llvm/lib/MC/MCSectionCOFF.cpp
+++ b/contrib/llvm/lib/MC/MCSectionCOFF.cpp
@@ -44,28 +44,28 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << 'w';
else
OS << 'r';
- if (getCharacteristics() & MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE)
+ if (getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE)
OS << 'n';
OS << "\"\n";
- if (getCharacteristics() & MCSectionCOFF::IMAGE_SCN_LNK_COMDAT) {
+ if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) {
switch (Selection) {
- case IMAGE_COMDAT_SELECT_NODUPLICATES:
+ case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES:
OS << "\t.linkonce one_only\n";
break;
- case IMAGE_COMDAT_SELECT_ANY:
+ case COFF::IMAGE_COMDAT_SELECT_ANY:
OS << "\t.linkonce discard\n";
break;
- case IMAGE_COMDAT_SELECT_SAME_SIZE:
+ case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE:
OS << "\t.linkonce same_size\n";
break;
- case IMAGE_COMDAT_SELECT_EXACT_MATCH:
+ case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH:
OS << "\t.linkonce same_contents\n";
break;
//NOTE: as of binutils 2.20, there is no way to specifiy select largest
// with the .linkonce directive. For now, we treat it as an invalid
// comdat selection value.
- case IMAGE_COMDAT_SELECT_LARGEST:
+ case COFF::IMAGE_COMDAT_SELECT_LARGEST:
// OS << "\t.linkonce largest\n";
// break;
default:
diff --git a/contrib/llvm/lib/MC/MachObjectWriter.cpp b/contrib/llvm/lib/MC/MachObjectWriter.cpp
index 3207e99..7ca0951 100644
--- a/contrib/llvm/lib/MC/MachObjectWriter.cpp
+++ b/contrib/llvm/lib/MC/MachObjectWriter.cpp
@@ -33,6 +33,7 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
default: llvm_unreachable("invalid fixup kind!");
case X86::reloc_pcrel_1byte:
case FK_Data_1: return 0;
+ case X86::reloc_pcrel_2byte:
case FK_Data_2: return 1;
case X86::reloc_pcrel_4byte:
case X86::reloc_riprel_4byte:
@@ -47,6 +48,7 @@ static bool isFixupKindPCRel(unsigned Kind) {
default:
return false;
case X86::reloc_pcrel_1byte:
+ case X86::reloc_pcrel_2byte:
case X86::reloc_pcrel_4byte:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
@@ -738,6 +740,51 @@ public:
Relocations[Fragment->getParent()].push_back(MRE);
}
+ void RecordTLVPRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ assert(Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP &&
+ !Is64Bit &&
+ "Should only be called with a 32-bit TLVP relocation!");
+
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ uint32_t Value = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = 0;
+
+ // Get the symbol data.
+ MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+ unsigned Index = SD_A->getIndex();
+
+ // We're only going to have a second symbol in pic mode and it'll be a
+ // subtraction from the picbase. For 32-bit pic the addend is the difference
+ // between the picbase and the next address. For 32-bit static the addend
+ // is zero.
+ if (Target.getSymB()) {
+ // If this is a subtraction then we're pcrel.
+ uint32_t FixupAddress =
+ Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+ MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
+ IsPCRel = 1;
+ FixedValue = (FixupAddress - Layout.getSymbolAddress(SD_B) +
+ Target.getConstant());
+ FixedValue += 1 << Log2Size;
+ } else {
+ FixedValue = 0;
+ }
+
+ // struct relocation_info (8 bytes)
+ MachRelocationEntry MRE;
+ MRE.Word0 = Value;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (1 << 27) | // Extern
+ (RIT_TLV << 28)); // Type
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) {
@@ -749,6 +796,12 @@ public:
unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ // If this is a 32-bit TLVP reloc it's handled a bit differently.
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
+ RecordTLVPRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
+ return;
+ }
+
// If this is a difference or a defined symbol plus an offset, then we need
// a scattered relocation entry.
// Differences always require scattered relocations.
@@ -772,7 +825,6 @@ public:
// See <reloc.h>.
uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
- uint32_t Value = 0;
unsigned Index = 0;
unsigned IsExtern = 0;
unsigned Type = 0;
@@ -783,7 +835,6 @@ public:
// FIXME: Currently, these are never generated (see code below). I cannot
// find a case where they are actually emitted.
Type = RIT_Vanilla;
- Value = 0;
} else {
// Check whether we need an external or internal relocation.
if (doesSymbolRequireExternRelocation(SD)) {
@@ -794,11 +845,9 @@ public:
// undefined. This occurs with weak definitions, for example.
if (!SD->Symbol->isUndefined())
FixedValue -= Layout.getSymbolAddress(SD);
- Value = 0;
} else {
// The index is the section ordinal (1-based).
Index = SD->getFragment()->getParent()->getOrdinal() + 1;
- Value = Layout.getSymbolAddress(SD);
}
Type = RIT_Vanilla;
@@ -898,7 +947,7 @@ public:
const MCSymbol &Symbol = it->getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it))
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
continue;
if (!it->isExternal() && !Symbol.isUndefined())
@@ -934,7 +983,7 @@ public:
const MCSymbol &Symbol = it->getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it))
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
continue;
if (it->isExternal() || Symbol.isUndefined())
diff --git a/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
new file mode 100644
index 0000000..6804766
--- /dev/null
+++ b/contrib/llvm/lib/MC/WinCOFFObjectWriter.cpp
@@ -0,0 +1,71 @@
+//===-- llvm/MC/WinCOFFObjectWriter.cpp -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of a Win32 COFF object file writer.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "WinCOFFObjectWriter"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCAsmLayout.h"
+using namespace llvm;
+
+namespace {
+
+ class WinCOFFObjectWriter : public MCObjectWriter {
+ public:
+ WinCOFFObjectWriter(raw_ostream &OS);
+
+ // MCObjectWriter interface implementation.
+
+ void ExecutePostLayoutBinding(MCAssembler &Asm);
+
+ void RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue);
+
+ void WriteObject(const MCAssembler &Asm, const MCAsmLayout &Layout);
+ };
+}
+
+WinCOFFObjectWriter::WinCOFFObjectWriter(raw_ostream &OS)
+ : MCObjectWriter(OS, true) {
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// MCObjectWriter interface implementations
+
+void WinCOFFObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm) {
+}
+
+void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup,
+ MCValue Target,
+ uint64_t &FixedValue) {
+}
+
+void WinCOFFObjectWriter::WriteObject(const MCAssembler &Asm,
+ const MCAsmLayout &Layout) {
+}
+
+//------------------------------------------------------------------------------
+// WinCOFFObjectWriter factory function
+
+namespace llvm {
+ MCObjectWriter *createWinCOFFObjectWriter(raw_ostream &OS) {
+ return new WinCOFFObjectWriter(OS);
+ }
+}
diff --git a/contrib/llvm/lib/MC/WinCOFFStreamer.cpp b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
new file mode 100644
index 0000000..1030cdb
--- /dev/null
+++ b/contrib/llvm/lib/MC/WinCOFFStreamer.cpp
@@ -0,0 +1,198 @@
+//===-- llvm/MC/WinCOFFStreamer.cpp -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of a Win32 COFF object file streamer.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "WinCOFFStreamer"
+
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCSectionCOFF.h"
+#include "llvm/Target/TargetAsmBackend.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define dbg_notimpl(x) \
+ do { dbgs() << "not implemented, " << __FUNCTION__ << " (" << x << ")"; \
+ abort(); } while (false);
+
+namespace {
+class WinCOFFStreamer : public MCObjectStreamer {
+public:
+ WinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS);
+
+ // MCStreamer interface
+
+ virtual void EmitLabel(MCSymbol *Symbol);
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute);
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
+ virtual void BeginCOFFSymbolDef(MCSymbol const *Symbol);
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+ virtual void EmitCOFFSymbolType(int Type);
+ virtual void EndCOFFSymbolDef();
+ virtual void EmitELFSize(MCSymbol *Symbol, const MCExpr *Value);
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size);
+ virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size,unsigned ByteAlignment);
+ virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment);
+ virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
+ virtual void EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace);
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value,
+ unsigned ValueSize, unsigned MaxBytesToEmit);
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit);
+ virtual void EmitValueToOffset(const MCExpr *Offset, unsigned char Value);
+ virtual void EmitFileDirective(StringRef Filename);
+ virtual void EmitDwarfFileDirective(unsigned FileNo,StringRef Filename);
+ virtual void EmitInstruction(const MCInst &Instruction);
+ virtual void Finish();
+};
+} // end anonymous namespace.
+
+WinCOFFStreamer::WinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS)
+ : MCObjectStreamer(Context, TAB, OS, &CE) {
+}
+
+// MCStreamer interface
+
+void WinCOFFStreamer::EmitLabel(MCSymbol *Symbol) {
+}
+
+void WinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
+ dbg_notimpl("Flag = " << Flag);
+}
+
+void WinCOFFStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
+}
+
+void WinCOFFStreamer::EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) {
+}
+
+void WinCOFFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
+ dbg_notimpl("Symbol = " << Symbol->getName() << ", DescValue = "<< DescValue);
+}
+
+void WinCOFFStreamer::BeginCOFFSymbolDef(MCSymbol const *Symbol) {
+}
+
+void WinCOFFStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {
+}
+
+void WinCOFFStreamer::EmitCOFFSymbolType(int Type) {
+}
+
+void WinCOFFStreamer::EndCOFFSymbolDef() {
+}
+
+void WinCOFFStreamer::EmitELFSize(MCSymbol *Symbol, const MCExpr *Value) {
+ dbg_notimpl("Symbol = " << Symbol->getName() << ", Value = " << *Value);
+}
+
+void WinCOFFStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) {
+}
+
+void WinCOFFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size) {
+}
+
+void WinCOFFStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
+ unsigned Size,unsigned ByteAlignment) {
+ MCSectionCOFF const *SectionCOFF =
+ static_cast<MCSectionCOFF const *>(Section);
+
+ dbg_notimpl("Section = " << SectionCOFF->getSectionName() << ", Symbol = " <<
+ Symbol->getName() << ", Size = " << Size << ", ByteAlignment = "
+ << ByteAlignment);
+}
+
+void WinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment) {
+ MCSectionCOFF const *SectionCOFF =
+ static_cast<MCSectionCOFF const *>(Section);
+
+ dbg_notimpl("Section = " << SectionCOFF->getSectionName() << ", Symbol = " <<
+ Symbol->getName() << ", Size = " << Size << ", ByteAlignment = "
+ << ByteAlignment);
+}
+
+void WinCOFFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
+}
+
+void WinCOFFStreamer::EmitValue(const MCExpr *Value, unsigned Size,
+ unsigned AddrSpace) {
+}
+
+void WinCOFFStreamer::EmitGPRel32Value(const MCExpr *Value) {
+ dbg_notimpl("Value = '" << *Value);
+}
+
+void WinCOFFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
+ int64_t Value,
+ unsigned ValueSize,
+ unsigned MaxBytesToEmit) {
+}
+
+void WinCOFFStreamer::EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0) {
+}
+
+void WinCOFFStreamer::EmitValueToOffset(const MCExpr *Offset,
+ unsigned char Value = 0) {
+ dbg_notimpl("Offset = '" << *Offset << "', Value = " << Value);
+}
+
+void WinCOFFStreamer::EmitFileDirective(StringRef Filename) {
+ // Ignore for now, linkers don't care, and proper debug
+ // info will be a much large effort.
+}
+
+void WinCOFFStreamer::EmitDwarfFileDirective(unsigned FileNo,
+ StringRef Filename) {
+ dbg_notimpl("FileNo = " << FileNo << ", Filename = '" << Filename << "'");
+}
+
+void WinCOFFStreamer::EmitInstruction(const MCInst &Instruction) {
+}
+
+void WinCOFFStreamer::Finish() {
+ MCObjectStreamer::Finish();
+}
+
+namespace llvm
+{
+ MCStreamer *createWinCOFFStreamer(MCContext &Context,
+ TargetAsmBackend &TAB,
+ MCCodeEmitter &CE,
+ raw_ostream &OS) {
+ return new WinCOFFStreamer(Context, TAB, CE, OS);
+ }
+}
diff --git a/contrib/llvm/lib/Support/APFloat.cpp b/contrib/llvm/lib/Support/APFloat.cpp
index 485bf4d..2e78557 100644
--- a/contrib/llvm/lib/Support/APFloat.cpp
+++ b/contrib/llvm/lib/Support/APFloat.cpp
@@ -761,7 +761,7 @@ APFloat::APFloat(const fltSemantics &ourSemantics,
makeNaN();
}
-APFloat::APFloat(const fltSemantics &ourSemantics, const StringRef& text)
+APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text)
{
assertArithmeticOK(ourSemantics);
initialize(&ourSemantics);
@@ -2185,8 +2185,7 @@ APFloat::convertFromZeroExtendedInteger(const integerPart *parts,
}
APFloat::opStatus
-APFloat::convertFromHexadecimalString(const StringRef &s,
- roundingMode rounding_mode)
+APFloat::convertFromHexadecimalString(StringRef s, roundingMode rounding_mode)
{
lostFraction lost_fraction = lfExactlyZero;
integerPart *significand;
@@ -2361,7 +2360,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts,
}
APFloat::opStatus
-APFloat::convertFromDecimalString(const StringRef &str, roundingMode rounding_mode)
+APFloat::convertFromDecimalString(StringRef str, roundingMode rounding_mode)
{
decimalInfo D;
opStatus fs;
@@ -2471,7 +2470,7 @@ APFloat::convertFromDecimalString(const StringRef &str, roundingMode rounding_mo
}
APFloat::opStatus
-APFloat::convertFromString(const StringRef &str, roundingMode rounding_mode)
+APFloat::convertFromString(StringRef str, roundingMode rounding_mode)
{
assertArithmeticOK(*semantics);
assert(!str.empty() && "Invalid string length");
diff --git a/contrib/llvm/lib/Support/APInt.cpp b/contrib/llvm/lib/Support/APInt.cpp
index 1341d21..262fa42 100644
--- a/contrib/llvm/lib/Support/APInt.cpp
+++ b/contrib/llvm/lib/Support/APInt.cpp
@@ -102,7 +102,7 @@ APInt::APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[])
clearUnusedBits();
}
-APInt::APInt(unsigned numbits, const StringRef& Str, uint8_t radix)
+APInt::APInt(unsigned numbits, StringRef Str, uint8_t radix)
: BitWidth(numbits), VAL(0) {
assert(BitWidth && "Bitwidth too small");
fromString(numbits, Str, radix);
@@ -613,7 +613,7 @@ APInt& APInt::flip(unsigned bitPosition) {
return *this;
}
-unsigned APInt::getBitsNeeded(const StringRef& str, uint8_t radix) {
+unsigned APInt::getBitsNeeded(StringRef str, uint8_t radix) {
assert(!str.empty() && "Invalid string length");
assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) &&
"Radix should be 2, 8, 10, or 16!");
@@ -2046,7 +2046,7 @@ void APInt::udivrem(const APInt &LHS, const APInt &RHS,
divide(LHS, lhsWords, RHS, rhsWords, &Quotient, &Remainder);
}
-void APInt::fromString(unsigned numbits, const StringRef& str, uint8_t radix) {
+void APInt::fromString(unsigned numbits, StringRef str, uint8_t radix) {
// Check our assumptions here
assert(!str.empty() && "Invalid string length");
assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) &&
diff --git a/contrib/llvm/lib/Support/CMakeLists.txt b/contrib/llvm/lib/Support/CMakeLists.txt
index f1347f9..366d2f7 100644
--- a/contrib/llvm/lib/Support/CMakeLists.txt
+++ b/contrib/llvm/lib/Support/CMakeLists.txt
@@ -8,6 +8,7 @@ add_llvm_library(LLVMSupport
ConstantRange.cpp
Debug.cpp
DeltaAlgorithm.cpp
+ DAGDeltaAlgorithm.cpp
Dwarf.cpp
ErrorHandling.cpp
FileUtilities.cpp
diff --git a/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
new file mode 100644
index 0000000..8145664
--- /dev/null
+++ b/contrib/llvm/lib/Support/DAGDeltaAlgorithm.cpp
@@ -0,0 +1,357 @@
+//===--- DAGDeltaAlgorithm.cpp - A DAG Minimization Algorithm --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// The algorithm we use attempts to exploit the dependency information by
+// minimizing top-down. We start by constructing an initial root set R, and
+// then iteratively:
+//
+// 1. Minimize the set R using the test predicate:
+// P'(S) = P(S union pred*(S))
+//
+// 2. Extend R to R' = R union pred(R).
+//
+// until a fixed point is reached.
+//
+// The idea is that we want to quickly prune entire portions of the graph, so we
+// try to find high-level nodes that can be eliminated with all of their
+// dependents.
+//
+// FIXME: The current algorithm doesn't actually provide a strong guarantee
+// about the minimality of the result. The problem is that after adding nodes to
+// the required set, we no longer consider them for elimination. For strictly
+// well formed predicates, this doesn't happen, but it commonly occurs in
+// practice when there are unmodelled dependencies. I believe we can resolve
+// this by allowing the required set to be minimized as well, but need more test
+// cases first.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DAGDeltaAlgorithm.h"
+#include "llvm/ADT/DeltaAlgorithm.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <map>
+using namespace llvm;
+
+namespace {
+
+class DAGDeltaAlgorithmImpl {
+ friend class DeltaActiveSetHelper;
+
+public:
+ typedef DAGDeltaAlgorithm::change_ty change_ty;
+ typedef DAGDeltaAlgorithm::changeset_ty changeset_ty;
+ typedef DAGDeltaAlgorithm::changesetlist_ty changesetlist_ty;
+ typedef DAGDeltaAlgorithm::edge_ty edge_ty;
+
+private:
+ typedef std::vector<change_ty>::iterator pred_iterator_ty;
+ typedef std::vector<change_ty>::iterator succ_iterator_ty;
+ typedef std::set<change_ty>::iterator pred_closure_iterator_ty;
+ typedef std::set<change_ty>::iterator succ_closure_iterator_ty;
+
+ DAGDeltaAlgorithm &DDA;
+
+ const changeset_ty &Changes;
+ const std::vector<edge_ty> &Dependencies;
+
+ std::vector<change_ty> Roots;
+
+ /// Cache of failed test results. Successful test results are never cached
+ /// since we always reduce following a success. We maintain an independent
+ /// cache from that used by the individual delta passes because we may get
+ /// hits across multiple individual delta invocations.
+ mutable std::set<changeset_ty> FailedTestsCache;
+
+ // FIXME: Gross.
+ std::map<change_ty, std::vector<change_ty> > Predecessors;
+ std::map<change_ty, std::vector<change_ty> > Successors;
+
+ std::map<change_ty, std::set<change_ty> > PredClosure;
+ std::map<change_ty, std::set<change_ty> > SuccClosure;
+
+private:
+ pred_iterator_ty pred_begin(change_ty Node) {
+ assert(Predecessors.count(Node) && "Invalid node!");
+ return Predecessors[Node].begin();
+ }
+ pred_iterator_ty pred_end(change_ty Node) {
+ assert(Predecessors.count(Node) && "Invalid node!");
+ return Predecessors[Node].end();
+ }
+
+ pred_closure_iterator_ty pred_closure_begin(change_ty Node) {
+ assert(PredClosure.count(Node) && "Invalid node!");
+ return PredClosure[Node].begin();
+ }
+ pred_closure_iterator_ty pred_closure_end(change_ty Node) {
+ assert(PredClosure.count(Node) && "Invalid node!");
+ return PredClosure[Node].end();
+ }
+
+ succ_iterator_ty succ_begin(change_ty Node) {
+ assert(Successors.count(Node) && "Invalid node!");
+ return Successors[Node].begin();
+ }
+ succ_iterator_ty succ_end(change_ty Node) {
+ assert(Successors.count(Node) && "Invalid node!");
+ return Successors[Node].end();
+ }
+
+ succ_closure_iterator_ty succ_closure_begin(change_ty Node) {
+ assert(SuccClosure.count(Node) && "Invalid node!");
+ return SuccClosure[Node].begin();
+ }
+ succ_closure_iterator_ty succ_closure_end(change_ty Node) {
+ assert(SuccClosure.count(Node) && "Invalid node!");
+ return SuccClosure[Node].end();
+ }
+
+ void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {
+ DDA.UpdatedSearchState(Changes, Sets, Required);
+ }
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \arg S.
+ bool ExecuteOneTest(const changeset_ty &S) {
+ // Check dependencies invariant.
+ DEBUG({
+ for (changeset_ty::const_iterator it = S.begin(),
+ ie = S.end(); it != ie; ++it)
+ for (succ_iterator_ty it2 = succ_begin(*it),
+ ie2 = succ_end(*it); it2 != ie2; ++it2)
+ assert(S.count(*it2) && "Attempt to run invalid changeset!");
+ });
+
+ return DDA.ExecuteOneTest(S);
+ }
+
+public:
+ DAGDeltaAlgorithmImpl(DAGDeltaAlgorithm &_DDA,
+ const changeset_ty &_Changes,
+ const std::vector<edge_ty> &_Dependencies);
+
+ changeset_ty Run();
+
+ /// GetTestResult - Get the test result for the active set \arg Changes with
+ /// \arg Required changes from the cache, executing the test if necessary.
+ ///
+ /// \param Changes - The set of active changes being minimized, which should
+ /// have their pred closure included in the test.
+ /// \param Required - The set of changes which have previously been
+ /// established to be required.
+ /// \return - The test result.
+ bool GetTestResult(const changeset_ty &Changes, const changeset_ty &Required);
+};
+
+/// Helper object for minimizing an active set of changes.
+class DeltaActiveSetHelper : public DeltaAlgorithm {
+ DAGDeltaAlgorithmImpl &DDAI;
+
+ const changeset_ty &Required;
+
+protected:
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets) {
+ DDAI.UpdatedSearchState(Changes, Sets, Required);
+ }
+
+ virtual bool ExecuteOneTest(const changeset_ty &S) {
+ return DDAI.GetTestResult(S, Required);
+ }
+
+public:
+ DeltaActiveSetHelper(DAGDeltaAlgorithmImpl &_DDAI,
+ const changeset_ty &_Required)
+ : DDAI(_DDAI), Required(_Required) {}
+};
+
+}
+
+DAGDeltaAlgorithmImpl::DAGDeltaAlgorithmImpl(DAGDeltaAlgorithm &_DDA,
+ const changeset_ty &_Changes,
+ const std::vector<edge_ty>
+ &_Dependencies)
+ : DDA(_DDA),
+ Changes(_Changes),
+ Dependencies(_Dependencies)
+{
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ Predecessors.insert(std::make_pair(*it, std::vector<change_ty>()));
+ Successors.insert(std::make_pair(*it, std::vector<change_ty>()));
+ }
+ for (std::vector<edge_ty>::const_iterator it = Dependencies.begin(),
+ ie = Dependencies.end(); it != ie; ++it) {
+ Predecessors[it->second].push_back(it->first);
+ Successors[it->first].push_back(it->second);
+ }
+
+ // Compute the roots.
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ if (succ_begin(*it) == succ_end(*it))
+ Roots.push_back(*it);
+
+ // Pre-compute the closure of the successor relation.
+ std::vector<change_ty> Worklist(Roots.begin(), Roots.end());
+ while (!Worklist.empty()) {
+ change_ty Change = Worklist.back();
+ Worklist.pop_back();
+
+ std::set<change_ty> &ChangeSuccs = SuccClosure[Change];
+ for (pred_iterator_ty it = pred_begin(Change),
+ ie = pred_end(Change); it != ie; ++it) {
+ SuccClosure[*it].insert(Change);
+ SuccClosure[*it].insert(ChangeSuccs.begin(), ChangeSuccs.end());
+ Worklist.push_back(*it);
+ }
+ }
+
+ // Invert to form the predecessor closure map.
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ PredClosure.insert(std::make_pair(*it, std::set<change_ty>()));
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
+ ie2 = succ_closure_end(*it); it2 != ie2; ++it2)
+ PredClosure[*it2].insert(*it);
+
+ // Dump useful debug info.
+ DEBUG({
+ llvm::errs() << "-- DAGDeltaAlgorithmImpl --\n";
+ llvm::errs() << "Changes: [";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ if (it != Changes.begin()) llvm::errs() << ", ";
+ llvm::errs() << *it;
+
+ if (succ_begin(*it) != succ_end(*it)) {
+ llvm::errs() << "(";
+ for (succ_iterator_ty it2 = succ_begin(*it),
+ ie2 = succ_end(*it); it2 != ie2; ++it2) {
+ if (it2 != succ_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << "->" << *it2;
+ }
+ llvm::errs() << ")";
+ }
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Roots: [";
+ for (std::vector<change_ty>::const_iterator it = Roots.begin(),
+ ie = Roots.end(); it != ie; ++it) {
+ if (it != Roots.begin()) llvm::errs() << ", ";
+ llvm::errs() << *it;
+ }
+ llvm::errs() << "]\n";
+
+ llvm::errs() << "Predecessor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (pred_closure_iterator_ty it2 = pred_closure_begin(*it),
+ ie2 = pred_closure_end(*it); it2 != ie2; ++it2) {
+ if (it2 != pred_closure_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << *it2;
+ }
+ llvm::errs() << "]\n";
+ }
+
+ llvm::errs() << "Successor Closure:\n";
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it) {
+ llvm::errs() << format(" %-4d: [", *it);
+ for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
+ ie2 = succ_closure_end(*it); it2 != ie2; ++it2) {
+ if (it2 != succ_closure_begin(*it)) llvm::errs() << ", ";
+ llvm::errs() << *it2;
+ }
+ llvm::errs() << "]\n";
+ }
+
+ llvm::errs() << "\n\n";
+ });
+}
+
+bool DAGDeltaAlgorithmImpl::GetTestResult(const changeset_ty &Changes,
+ const changeset_ty &Required) {
+ changeset_ty Extended(Required);
+ Extended.insert(Changes.begin(), Changes.end());
+ for (changeset_ty::const_iterator it = Changes.begin(),
+ ie = Changes.end(); it != ie; ++it)
+ Extended.insert(pred_closure_begin(*it), pred_closure_end(*it));
+
+ if (FailedTestsCache.count(Extended))
+ return false;
+
+ bool Result = ExecuteOneTest(Extended);
+ if (!Result)
+ FailedTestsCache.insert(Extended);
+
+ return Result;
+}
+
+DAGDeltaAlgorithm::changeset_ty
+DAGDeltaAlgorithmImpl::Run() {
+ // The current set of changes we are minimizing, starting at the roots.
+ changeset_ty CurrentSet(Roots.begin(), Roots.end());
+
+ // The set of required changes.
+ changeset_ty Required;
+
+ // Iterate until the active set of changes is empty. Convergence is guaranteed
+ // assuming input was a DAG.
+ //
+ // Invariant: CurrentSet intersect Required == {}
+ // Invariant: Required == (Required union succ*(Required))
+ while (!CurrentSet.empty()) {
+ DEBUG({
+ llvm::errs() << "DAG_DD - " << CurrentSet.size() << " active changes, "
+ << Required.size() << " required changes\n";
+ });
+
+ // Minimize the current set of changes.
+ DeltaActiveSetHelper Helper(*this, Required);
+ changeset_ty CurrentMinSet = Helper.Run(CurrentSet);
+
+ // Update the set of required changes. Since
+ // CurrentMinSet subset CurrentSet
+ // and after the last iteration,
+ // succ(CurrentSet) subset Required
+ // then
+ // succ(CurrentMinSet) subset Required
+ // and our invariant on Required is maintained.
+ Required.insert(CurrentMinSet.begin(), CurrentMinSet.end());
+
+ // Replace the current set with the predecssors of the minimized set of
+ // active changes.
+ CurrentSet.clear();
+ for (changeset_ty::const_iterator it = CurrentMinSet.begin(),
+ ie = CurrentMinSet.end(); it != ie; ++it)
+ CurrentSet.insert(pred_begin(*it), pred_end(*it));
+
+ // FIXME: We could enforce CurrentSet intersect Required == {} here if we
+ // wanted to protect against cyclic graphs.
+ }
+
+ return Required;
+}
+
+DAGDeltaAlgorithm::changeset_ty
+DAGDeltaAlgorithm::Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies) {
+ return DAGDeltaAlgorithmImpl(*this, Changes, Dependencies).Run();
+}
diff --git a/contrib/llvm/lib/Support/DeltaAlgorithm.cpp b/contrib/llvm/lib/Support/DeltaAlgorithm.cpp
index d176548..9e52874 100644
--- a/contrib/llvm/lib/Support/DeltaAlgorithm.cpp
+++ b/contrib/llvm/lib/Support/DeltaAlgorithm.cpp
@@ -30,10 +30,10 @@ void DeltaAlgorithm::Split(const changeset_ty &S, changesetlist_ty &Res) {
// FIXME: This is really slow.
changeset_ty LHS, RHS;
- unsigned idx = 0;
+ unsigned idx = 0, N = S.size() / 2;
for (changeset_ty::const_iterator it = S.begin(),
ie = S.end(); it != ie; ++it, ++idx)
- ((idx & 1) ? LHS : RHS).insert(*it);
+ ((idx < N) ? LHS : RHS).insert(*it);
if (!LHS.empty())
Res.push_back(LHS);
if (!RHS.empty())
diff --git a/contrib/llvm/lib/Support/Dwarf.cpp b/contrib/llvm/lib/Support/Dwarf.cpp
index c19c2d6..96ce9d3 100644
--- a/contrib/llvm/lib/Support/Dwarf.cpp
+++ b/contrib/llvm/lib/Support/Dwarf.cpp
@@ -86,8 +86,8 @@ const char *llvm::dwarf::TagString(unsigned Tag) {
///
const char *llvm::dwarf::ChildrenString(unsigned Children) {
switch (Children) {
- case DW_CHILDREN_no: return "CHILDREN_no";
- case DW_CHILDREN_yes: return "CHILDREN_yes";
+ case DW_CHILDREN_no: return "DW_CHILDREN_no";
+ case DW_CHILDREN_yes: return "DW_CHILDREN_yes";
}
return 0;
}
@@ -207,27 +207,27 @@ const char *llvm::dwarf::AttributeString(unsigned Attribute) {
///
const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_FORM_addr: return "FORM_addr";
- case DW_FORM_block2: return "FORM_block2";
- case DW_FORM_block4: return "FORM_block4";
- case DW_FORM_data2: return "FORM_data2";
- case DW_FORM_data4: return "FORM_data4";
- case DW_FORM_data8: return "FORM_data8";
- case DW_FORM_string: return "FORM_string";
- case DW_FORM_block: return "FORM_block";
- case DW_FORM_block1: return "FORM_block1";
- case DW_FORM_data1: return "FORM_data1";
- case DW_FORM_flag: return "FORM_flag";
- case DW_FORM_sdata: return "FORM_sdata";
- case DW_FORM_strp: return "FORM_strp";
- case DW_FORM_udata: return "FORM_udata";
- case DW_FORM_ref_addr: return "FORM_ref_addr";
- case DW_FORM_ref1: return "FORM_ref1";
- case DW_FORM_ref2: return "FORM_ref2";
- case DW_FORM_ref4: return "FORM_ref4";
- case DW_FORM_ref8: return "FORM_ref8";
- case DW_FORM_ref_udata: return "FORM_ref_udata";
- case DW_FORM_indirect: return "FORM_indirect";
+ case DW_FORM_addr: return "DW_FORM_addr";
+ case DW_FORM_block2: return "DW_FORM_block2";
+ case DW_FORM_block4: return "DW_FORM_block4";
+ case DW_FORM_data2: return "DW_FORM_data2";
+ case DW_FORM_data4: return "DW_FORM_data4";
+ case DW_FORM_data8: return "DW_FORM_data8";
+ case DW_FORM_string: return "DW_FORM_string";
+ case DW_FORM_block: return "DW_FORM_block";
+ case DW_FORM_block1: return "DW_FORM_block1";
+ case DW_FORM_data1: return "DW_FORM_data1";
+ case DW_FORM_flag: return "DW_FORM_flag";
+ case DW_FORM_sdata: return "DW_FORM_sdata";
+ case DW_FORM_strp: return "DW_FORM_strp";
+ case DW_FORM_udata: return "DW_FORM_udata";
+ case DW_FORM_ref_addr: return "DW_FORM_ref_addr";
+ case DW_FORM_ref1: return "DW_FORM_ref1";
+ case DW_FORM_ref2: return "DW_FORM_ref2";
+ case DW_FORM_ref4: return "DW_FORM_ref4";
+ case DW_FORM_ref8: return "DW_FORM_ref8";
+ case DW_FORM_ref_udata: return "DW_FORM_ref_udata";
+ case DW_FORM_indirect: return "DW_FORM_indirect";
}
return 0;
}
@@ -236,72 +236,159 @@ const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
/// encoding.
const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_OP_addr: return "OP_addr";
- case DW_OP_deref: return "OP_deref";
- case DW_OP_const1u: return "OP_const1u";
- case DW_OP_const1s: return "OP_const1s";
- case DW_OP_const2u: return "OP_const2u";
- case DW_OP_const2s: return "OP_const2s";
- case DW_OP_const4u: return "OP_const4u";
- case DW_OP_const4s: return "OP_const4s";
- case DW_OP_const8u: return "OP_const8u";
- case DW_OP_const8s: return "OP_const8s";
- case DW_OP_constu: return "OP_constu";
- case DW_OP_consts: return "OP_consts";
- case DW_OP_dup: return "OP_dup";
- case DW_OP_drop: return "OP_drop";
- case DW_OP_over: return "OP_over";
- case DW_OP_pick: return "OP_pick";
- case DW_OP_swap: return "OP_swap";
- case DW_OP_rot: return "OP_rot";
- case DW_OP_xderef: return "OP_xderef";
- case DW_OP_abs: return "OP_abs";
- case DW_OP_and: return "OP_and";
- case DW_OP_div: return "OP_div";
- case DW_OP_minus: return "OP_minus";
- case DW_OP_mod: return "OP_mod";
- case DW_OP_mul: return "OP_mul";
- case DW_OP_neg: return "OP_neg";
- case DW_OP_not: return "OP_not";
- case DW_OP_or: return "OP_or";
- case DW_OP_plus: return "OP_plus";
- case DW_OP_plus_uconst: return "OP_plus_uconst";
- case DW_OP_shl: return "OP_shl";
- case DW_OP_shr: return "OP_shr";
- case DW_OP_shra: return "OP_shra";
- case DW_OP_xor: return "OP_xor";
- case DW_OP_skip: return "OP_skip";
- case DW_OP_bra: return "OP_bra";
- case DW_OP_eq: return "OP_eq";
- case DW_OP_ge: return "OP_ge";
- case DW_OP_gt: return "OP_gt";
- case DW_OP_le: return "OP_le";
- case DW_OP_lt: return "OP_lt";
- case DW_OP_ne: return "OP_ne";
- case DW_OP_lit0: return "OP_lit0";
- case DW_OP_lit1: return "OP_lit1";
- case DW_OP_lit31: return "OP_lit31";
- case DW_OP_reg0: return "OP_reg0";
- case DW_OP_reg1: return "OP_reg1";
- case DW_OP_reg31: return "OP_reg31";
- case DW_OP_breg0: return "OP_breg0";
- case DW_OP_breg1: return "OP_breg1";
- case DW_OP_breg31: return "OP_breg31";
- case DW_OP_regx: return "OP_regx";
- case DW_OP_fbreg: return "OP_fbreg";
- case DW_OP_bregx: return "OP_bregx";
- case DW_OP_piece: return "OP_piece";
- case DW_OP_deref_size: return "OP_deref_size";
- case DW_OP_xderef_size: return "OP_xderef_size";
- case DW_OP_nop: return "OP_nop";
- case DW_OP_push_object_address: return "OP_push_object_address";
- case DW_OP_call2: return "OP_call2";
- case DW_OP_call4: return "OP_call4";
- case DW_OP_call_ref: return "OP_call_ref";
- case DW_OP_form_tls_address: return "OP_form_tls_address";
- case DW_OP_call_frame_cfa: return "OP_call_frame_cfa";
- case DW_OP_lo_user: return "OP_lo_user";
- case DW_OP_hi_user: return "OP_hi_user";
+ case DW_OP_addr: return "DW_OP_addr";
+ case DW_OP_deref: return "DW_OP_deref";
+ case DW_OP_const1u: return "DW_OP_const1u";
+ case DW_OP_const1s: return "DW_OP_const1s";
+ case DW_OP_const2u: return "DW_OP_const2u";
+ case DW_OP_const2s: return "DW_OP_const2s";
+ case DW_OP_const4u: return "DW_OP_const4u";
+ case DW_OP_const4s: return "DW_OP_const4s";
+ case DW_OP_const8u: return "DW_OP_const8u";
+ case DW_OP_const8s: return "DW_OP_const8s";
+ case DW_OP_constu: return "DW_OP_constu";
+ case DW_OP_consts: return "DW_OP_consts";
+ case DW_OP_dup: return "DW_OP_dup";
+ case DW_OP_drop: return "DW_OP_drop";
+ case DW_OP_over: return "DW_OP_over";
+ case DW_OP_pick: return "DW_OP_pick";
+ case DW_OP_swap: return "DW_OP_swap";
+ case DW_OP_rot: return "DW_OP_rot";
+ case DW_OP_xderef: return "DW_OP_xderef";
+ case DW_OP_abs: return "DW_OP_abs";
+ case DW_OP_and: return "DW_OP_and";
+ case DW_OP_div: return "DW_OP_div";
+ case DW_OP_minus: return "DW_OP_minus";
+ case DW_OP_mod: return "DW_OP_mod";
+ case DW_OP_mul: return "DW_OP_mul";
+ case DW_OP_neg: return "DW_OP_neg";
+ case DW_OP_not: return "DW_OP_not";
+ case DW_OP_or: return "DW_OP_or";
+ case DW_OP_plus: return "DW_OP_plus";
+ case DW_OP_plus_uconst: return "DW_OP_plus_uconst";
+ case DW_OP_shl: return "DW_OP_shl";
+ case DW_OP_shr: return "DW_OP_shr";
+ case DW_OP_shra: return "DW_OP_shra";
+ case DW_OP_xor: return "DW_OP_xor";
+ case DW_OP_skip: return "DW_OP_skip";
+ case DW_OP_bra: return "DW_OP_bra";
+ case DW_OP_eq: return "DW_OP_eq";
+ case DW_OP_ge: return "DW_OP_ge";
+ case DW_OP_gt: return "DW_OP_gt";
+ case DW_OP_le: return "DW_OP_le";
+ case DW_OP_lt: return "DW_OP_lt";
+ case DW_OP_ne: return "DW_OP_ne";
+ case DW_OP_lit0: return "DW_OP_lit0";
+ case DW_OP_lit1: return "DW_OP_lit1";
+ case DW_OP_lit2: return "DW_OP_lit2";
+ case DW_OP_lit3: return "DW_OP_lit3";
+ case DW_OP_lit4: return "DW_OP_lit4";
+ case DW_OP_lit5: return "DW_OP_lit5";
+ case DW_OP_lit6: return "DW_OP_lit6";
+ case DW_OP_lit7: return "DW_OP_lit7";
+ case DW_OP_lit8: return "DW_OP_lit8";
+ case DW_OP_lit9: return "DW_OP_lit9";
+ case DW_OP_lit10: return "DW_OP_lit10";
+ case DW_OP_lit11: return "DW_OP_lit11";
+ case DW_OP_lit12: return "DW_OP_lit12";
+ case DW_OP_lit13: return "DW_OP_lit13";
+ case DW_OP_lit14: return "DW_OP_lit14";
+ case DW_OP_lit15: return "DW_OP_lit15";
+ case DW_OP_lit16: return "DW_OP_lit16";
+ case DW_OP_lit17: return "DW_OP_lit17";
+ case DW_OP_lit18: return "DW_OP_lit18";
+ case DW_OP_lit19: return "DW_OP_lit19";
+ case DW_OP_lit20: return "DW_OP_lit20";
+ case DW_OP_lit21: return "DW_OP_lit21";
+ case DW_OP_lit22: return "DW_OP_lit22";
+ case DW_OP_lit23: return "DW_OP_lit23";
+ case DW_OP_lit24: return "DW_OP_lit24";
+ case DW_OP_lit25: return "DW_OP_lit25";
+ case DW_OP_lit26: return "DW_OP_lit26";
+ case DW_OP_lit27: return "DW_OP_lit27";
+ case DW_OP_lit28: return "DW_OP_lit28";
+ case DW_OP_lit29: return "DW_OP_lit29";
+ case DW_OP_lit30: return "DW_OP_lit30";
+ case DW_OP_lit31: return "DW_OP_lit31";
+ case DW_OP_reg0: return "DW_OP_reg0";
+ case DW_OP_reg1: return "DW_OP_reg1";
+ case DW_OP_reg2: return "DW_OP_reg2";
+ case DW_OP_reg3: return "DW_OP_reg3";
+ case DW_OP_reg4: return "DW_OP_reg4";
+ case DW_OP_reg5: return "DW_OP_reg5";
+ case DW_OP_reg6: return "DW_OP_reg6";
+ case DW_OP_reg7: return "DW_OP_reg7";
+ case DW_OP_reg8: return "DW_OP_reg8";
+ case DW_OP_reg9: return "DW_OP_reg9";
+ case DW_OP_reg10: return "DW_OP_reg10";
+ case DW_OP_reg11: return "DW_OP_reg11";
+ case DW_OP_reg12: return "DW_OP_reg12";
+ case DW_OP_reg13: return "DW_OP_reg13";
+ case DW_OP_reg14: return "DW_OP_reg14";
+ case DW_OP_reg15: return "DW_OP_reg15";
+ case DW_OP_reg16: return "DW_OP_reg16";
+ case DW_OP_reg17: return "DW_OP_reg17";
+ case DW_OP_reg18: return "DW_OP_reg18";
+ case DW_OP_reg19: return "DW_OP_reg19";
+ case DW_OP_reg20: return "DW_OP_reg20";
+ case DW_OP_reg21: return "DW_OP_reg21";
+ case DW_OP_reg22: return "DW_OP_reg22";
+ case DW_OP_reg23: return "DW_OP_reg23";
+ case DW_OP_reg24: return "DW_OP_reg24";
+ case DW_OP_reg25: return "DW_OP_reg25";
+ case DW_OP_reg26: return "DW_OP_reg26";
+ case DW_OP_reg27: return "DW_OP_reg27";
+ case DW_OP_reg28: return "DW_OP_reg28";
+ case DW_OP_reg29: return "DW_OP_reg29";
+ case DW_OP_reg30: return "DW_OP_reg30";
+ case DW_OP_reg31: return "DW_OP_reg31";
+ case DW_OP_breg0: return "DW_OP_breg0";
+ case DW_OP_breg1: return "DW_OP_breg1";
+ case DW_OP_breg2: return "DW_OP_breg2";
+ case DW_OP_breg3: return "DW_OP_breg3";
+ case DW_OP_breg4: return "DW_OP_breg4";
+ case DW_OP_breg5: return "DW_OP_breg5";
+ case DW_OP_breg6: return "DW_OP_breg6";
+ case DW_OP_breg7: return "DW_OP_breg7";
+ case DW_OP_breg8: return "DW_OP_breg8";
+ case DW_OP_breg9: return "DW_OP_breg9";
+ case DW_OP_breg10: return "DW_OP_breg10";
+ case DW_OP_breg11: return "DW_OP_breg11";
+ case DW_OP_breg12: return "DW_OP_breg12";
+ case DW_OP_breg13: return "DW_OP_breg13";
+ case DW_OP_breg14: return "DW_OP_breg14";
+ case DW_OP_breg15: return "DW_OP_breg15";
+ case DW_OP_breg16: return "DW_OP_breg16";
+ case DW_OP_breg17: return "DW_OP_breg17";
+ case DW_OP_breg18: return "DW_OP_breg18";
+ case DW_OP_breg19: return "DW_OP_breg19";
+ case DW_OP_breg20: return "DW_OP_breg20";
+ case DW_OP_breg21: return "DW_OP_breg21";
+ case DW_OP_breg22: return "DW_OP_breg22";
+ case DW_OP_breg23: return "DW_OP_breg23";
+ case DW_OP_breg24: return "DW_OP_breg24";
+ case DW_OP_breg25: return "DW_OP_breg25";
+ case DW_OP_breg26: return "DW_OP_breg26";
+ case DW_OP_breg27: return "DW_OP_breg27";
+ case DW_OP_breg28: return "DW_OP_breg28";
+ case DW_OP_breg29: return "DW_OP_breg29";
+ case DW_OP_breg30: return "DW_OP_breg30";
+ case DW_OP_breg31: return "DW_OP_breg31";
+ case DW_OP_regx: return "DW_OP_regx";
+ case DW_OP_fbreg: return "DW_OP_fbreg";
+ case DW_OP_bregx: return "DW_OP_bregx";
+ case DW_OP_piece: return "DW_OP_piece";
+ case DW_OP_deref_size: return "DW_OP_deref_size";
+ case DW_OP_xderef_size: return "DW_OP_xderef_size";
+ case DW_OP_nop: return "DW_OP_nop";
+ case DW_OP_push_object_address: return "DW_OP_push_object_address";
+ case DW_OP_call2: return "DW_OP_call2";
+ case DW_OP_call4: return "DW_OP_call4";
+ case DW_OP_call_ref: return "DW_OP_call_ref";
+ case DW_OP_form_tls_address: return "DW_OP_form_tls_address";
+ case DW_OP_call_frame_cfa: return "DW_OP_call_frame_cfa";
+ case DW_OP_lo_user: return "DW_OP_lo_user";
+ case DW_OP_hi_user: return "DW_OP_hi_user";
}
return 0;
}
@@ -310,23 +397,23 @@ const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
/// encoding.
const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_ATE_address: return "ATE_address";
- case DW_ATE_boolean: return "ATE_boolean";
- case DW_ATE_complex_float: return "ATE_complex_float";
- case DW_ATE_float: return "ATE_float";
- case DW_ATE_signed: return "ATE_signed";
- case DW_ATE_signed_char: return "ATE_signed_char";
- case DW_ATE_unsigned: return "ATE_unsigned";
- case DW_ATE_unsigned_char: return "ATE_unsigned_char";
- case DW_ATE_imaginary_float: return "ATE_imaginary_float";
- case DW_ATE_packed_decimal: return "ATE_packed_decimal";
- case DW_ATE_numeric_string: return "ATE_numeric_string";
- case DW_ATE_edited: return "ATE_edited";
- case DW_ATE_signed_fixed: return "ATE_signed_fixed";
- case DW_ATE_unsigned_fixed: return "ATE_unsigned_fixed";
- case DW_ATE_decimal_float: return "ATE_decimal_float";
- case DW_ATE_lo_user: return "ATE_lo_user";
- case DW_ATE_hi_user: return "ATE_hi_user";
+ case DW_ATE_address: return "DW_ATE_address";
+ case DW_ATE_boolean: return "DW_ATE_boolean";
+ case DW_ATE_complex_float: return "DW_ATE_complex_float";
+ case DW_ATE_float: return "DW_ATE_float";
+ case DW_ATE_signed: return "DW_ATE_signed";
+ case DW_ATE_signed_char: return "DW_ATE_signed_char";
+ case DW_ATE_unsigned: return "DW_ATE_unsigned";
+ case DW_ATE_unsigned_char: return "DW_ATE_unsigned_char";
+ case DW_ATE_imaginary_float: return "DW_ATE_imaginary_float";
+ case DW_ATE_packed_decimal: return "DW_ATE_packed_decimal";
+ case DW_ATE_numeric_string: return "DW_ATE_numeric_string";
+ case DW_ATE_edited: return "DW_ATE_edited";
+ case DW_ATE_signed_fixed: return "DW_ATE_signed_fixed";
+ case DW_ATE_unsigned_fixed: return "DW_ATE_unsigned_fixed";
+ case DW_ATE_decimal_float: return "DW_ATE_decimal_float";
+ case DW_ATE_lo_user: return "DW_ATE_lo_user";
+ case DW_ATE_hi_user: return "DW_ATE_hi_user";
}
return 0;
}
@@ -335,11 +422,11 @@ const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
/// attribute.
const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
switch (Sign) {
- case DW_DS_unsigned: return "DS_unsigned";
- case DW_DS_leading_overpunch: return "DS_leading_overpunch";
- case DW_DS_trailing_overpunch: return "DS_trailing_overpunch";
- case DW_DS_leading_separate: return "DS_leading_separate";
- case DW_DS_trailing_separate: return "DS_trailing_separate";
+ case DW_DS_unsigned: return "DW_DS_unsigned";
+ case DW_DS_leading_overpunch: return "DW_DS_leading_overpunch";
+ case DW_DS_trailing_overpunch: return "DW_DS_trailing_overpunch";
+ case DW_DS_leading_separate: return "DW_DS_leading_separate";
+ case DW_DS_trailing_separate: return "DW_DS_trailing_separate";
}
return 0;
}
@@ -348,11 +435,11 @@ const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
///
const char *llvm::dwarf::EndianityString(unsigned Endian) {
switch (Endian) {
- case DW_END_default: return "END_default";
- case DW_END_big: return "END_big";
- case DW_END_little: return "END_little";
- case DW_END_lo_user: return "END_lo_user";
- case DW_END_hi_user: return "END_hi_user";
+ case DW_END_default: return "DW_END_default";
+ case DW_END_big: return "DW_END_big";
+ case DW_END_little: return "DW_END_little";
+ case DW_END_lo_user: return "DW_END_lo_user";
+ case DW_END_hi_user: return "DW_END_hi_user";
}
return 0;
}
@@ -362,9 +449,9 @@ const char *llvm::dwarf::EndianityString(unsigned Endian) {
const char *llvm::dwarf::AccessibilityString(unsigned Access) {
switch (Access) {
// Accessibility codes
- case DW_ACCESS_public: return "ACCESS_public";
- case DW_ACCESS_protected: return "ACCESS_protected";
- case DW_ACCESS_private: return "ACCESS_private";
+ case DW_ACCESS_public: return "DW_ACCESS_public";
+ case DW_ACCESS_protected: return "DW_ACCESS_protected";
+ case DW_ACCESS_private: return "DW_ACCESS_private";
}
return 0;
}
@@ -373,9 +460,9 @@ const char *llvm::dwarf::AccessibilityString(unsigned Access) {
///
const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
switch (Visibility) {
- case DW_VIS_local: return "VIS_local";
- case DW_VIS_exported: return "VIS_exported";
- case DW_VIS_qualified: return "VIS_qualified";
+ case DW_VIS_local: return "DW_VIS_local";
+ case DW_VIS_exported: return "DW_VIS_exported";
+ case DW_VIS_qualified: return "DW_VIS_qualified";
}
return 0;
}
@@ -384,9 +471,9 @@ const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
///
const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
switch (Virtuality) {
- case DW_VIRTUALITY_none: return "VIRTUALITY_none";
- case DW_VIRTUALITY_virtual: return "VIRTUALITY_virtual";
- case DW_VIRTUALITY_pure_virtual: return "VIRTUALITY_pure_virtual";
+ case DW_VIRTUALITY_none: return "DW_VIRTUALITY_none";
+ case DW_VIRTUALITY_virtual: return "DW_VIRTUALITY_virtual";
+ case DW_VIRTUALITY_pure_virtual: return "DW_VIRTUALITY_pure_virtual";
}
return 0;
}
@@ -395,27 +482,27 @@ const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
///
const char *llvm::dwarf::LanguageString(unsigned Language) {
switch (Language) {
- case DW_LANG_C89: return "LANG_C89";
- case DW_LANG_C: return "LANG_C";
- case DW_LANG_Ada83: return "LANG_Ada83";
- case DW_LANG_C_plus_plus: return "LANG_C_plus_plus";
- case DW_LANG_Cobol74: return "LANG_Cobol74";
- case DW_LANG_Cobol85: return "LANG_Cobol85";
- case DW_LANG_Fortran77: return "LANG_Fortran77";
- case DW_LANG_Fortran90: return "LANG_Fortran90";
- case DW_LANG_Pascal83: return "LANG_Pascal83";
- case DW_LANG_Modula2: return "LANG_Modula2";
- case DW_LANG_Java: return "LANG_Java";
- case DW_LANG_C99: return "LANG_C99";
- case DW_LANG_Ada95: return "LANG_Ada95";
- case DW_LANG_Fortran95: return "LANG_Fortran95";
- case DW_LANG_PLI: return "LANG_PLI";
- case DW_LANG_ObjC: return "LANG_ObjC";
- case DW_LANG_ObjC_plus_plus: return "LANG_ObjC_plus_plus";
- case DW_LANG_UPC: return "LANG_UPC";
- case DW_LANG_D: return "LANG_D";
- case DW_LANG_lo_user: return "LANG_lo_user";
- case DW_LANG_hi_user: return "LANG_hi_user";
+ case DW_LANG_C89: return "DW_LANG_C89";
+ case DW_LANG_C: return "DW_LANG_C";
+ case DW_LANG_Ada83: return "DW_LANG_Ada83";
+ case DW_LANG_C_plus_plus: return "DW_LANG_C_plus_plus";
+ case DW_LANG_Cobol74: return "DW_LANG_Cobol74";
+ case DW_LANG_Cobol85: return "DW_LANG_Cobol85";
+ case DW_LANG_Fortran77: return "DW_LANG_Fortran77";
+ case DW_LANG_Fortran90: return "DW_LANG_Fortran90";
+ case DW_LANG_Pascal83: return "DW_LANG_Pascal83";
+ case DW_LANG_Modula2: return "DW_LANG_Modula2";
+ case DW_LANG_Java: return "DW_LANG_Java";
+ case DW_LANG_C99: return "DW_LANG_C99";
+ case DW_LANG_Ada95: return "DW_LANG_Ada95";
+ case DW_LANG_Fortran95: return "DW_LANG_Fortran95";
+ case DW_LANG_PLI: return "DW_LANG_PLI";
+ case DW_LANG_ObjC: return "DW_LANG_ObjC";
+ case DW_LANG_ObjC_plus_plus: return "DW_LANG_ObjC_plus_plus";
+ case DW_LANG_UPC: return "DW_LANG_UPC";
+ case DW_LANG_D: return "DW_LANG_D";
+ case DW_LANG_lo_user: return "DW_LANG_lo_user";
+ case DW_LANG_hi_user: return "DW_LANG_hi_user";
}
return 0;
}
@@ -424,10 +511,10 @@ const char *llvm::dwarf::LanguageString(unsigned Language) {
///
const char *llvm::dwarf::CaseString(unsigned Case) {
switch (Case) {
- case DW_ID_case_sensitive: return "ID_case_sensitive";
- case DW_ID_up_case: return "ID_up_case";
- case DW_ID_down_case: return "ID_down_case";
- case DW_ID_case_insensitive: return "ID_case_insensitive";
+ case DW_ID_case_sensitive: return "DW_ID_case_sensitive";
+ case DW_ID_up_case: return "DW_ID_up_case";
+ case DW_ID_down_case: return "DW_ID_down_case";
+ case DW_ID_case_insensitive: return "DW_ID_case_insensitive";
}
return 0;
}
@@ -436,11 +523,11 @@ const char *llvm::dwarf::CaseString(unsigned Case) {
///
const char *llvm::dwarf::ConventionString(unsigned Convention) {
switch (Convention) {
- case DW_CC_normal: return "CC_normal";
- case DW_CC_program: return "CC_program";
- case DW_CC_nocall: return "CC_nocall";
- case DW_CC_lo_user: return "CC_lo_user";
- case DW_CC_hi_user: return "CC_hi_user";
+ case DW_CC_normal: return "DW_CC_normal";
+ case DW_CC_program: return "DW_CC_program";
+ case DW_CC_nocall: return "DW_CC_nocall";
+ case DW_CC_lo_user: return "DW_CC_lo_user";
+ case DW_CC_hi_user: return "DW_CC_hi_user";
}
return 0;
}
@@ -449,10 +536,10 @@ const char *llvm::dwarf::ConventionString(unsigned Convention) {
///
const char *llvm::dwarf::InlineCodeString(unsigned Code) {
switch (Code) {
- case DW_INL_not_inlined: return "INL_not_inlined";
- case DW_INL_inlined: return "INL_inlined";
- case DW_INL_declared_not_inlined: return "INL_declared_not_inlined";
- case DW_INL_declared_inlined: return "INL_declared_inlined";
+ case DW_INL_not_inlined: return "DW_INL_not_inlined";
+ case DW_INL_inlined: return "DW_INL_inlined";
+ case DW_INL_declared_not_inlined: return "DW_INL_declared_not_inlined";
+ case DW_INL_declared_inlined: return "DW_INL_declared_inlined";
}
return 0;
}
@@ -461,8 +548,8 @@ const char *llvm::dwarf::InlineCodeString(unsigned Code) {
///
const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
switch (Order) {
- case DW_ORD_row_major: return "ORD_row_major";
- case DW_ORD_col_major: return "ORD_col_major";
+ case DW_ORD_row_major: return "DW_ORD_row_major";
+ case DW_ORD_col_major: return "DW_ORD_col_major";
}
return 0;
}
@@ -471,8 +558,8 @@ const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
/// descriptor.
const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
switch (Discriminant) {
- case DW_DSC_label: return "DSC_label";
- case DW_DSC_range: return "DSC_range";
+ case DW_DSC_label: return "DW_DSC_label";
+ case DW_DSC_range: return "DW_DSC_range";
}
return 0;
}
@@ -481,18 +568,18 @@ const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
///
const char *llvm::dwarf::LNStandardString(unsigned Standard) {
switch (Standard) {
- case DW_LNS_copy: return "LNS_copy";
- case DW_LNS_advance_pc: return "LNS_advance_pc";
- case DW_LNS_advance_line: return "LNS_advance_line";
- case DW_LNS_set_file: return "LNS_set_file";
- case DW_LNS_set_column: return "LNS_set_column";
- case DW_LNS_negate_stmt: return "LNS_negate_stmt";
- case DW_LNS_set_basic_block: return "LNS_set_basic_block";
- case DW_LNS_const_add_pc: return "LNS_const_add_pc";
- case DW_LNS_fixed_advance_pc: return "LNS_fixed_advance_pc";
- case DW_LNS_set_prologue_end: return "LNS_set_prologue_end";
- case DW_LNS_set_epilogue_begin: return "LNS_set_epilogue_begin";
- case DW_LNS_set_isa: return "LNS_set_isa";
+ case DW_LNS_copy: return "DW_LNS_copy";
+ case DW_LNS_advance_pc: return "DW_LNS_advance_pc";
+ case DW_LNS_advance_line: return "DW_LNS_advance_line";
+ case DW_LNS_set_file: return "DW_LNS_set_file";
+ case DW_LNS_set_column: return "DW_LNS_set_column";
+ case DW_LNS_negate_stmt: return "DW_LNS_negate_stmt";
+ case DW_LNS_set_basic_block: return "DW_LNS_set_basic_block";
+ case DW_LNS_const_add_pc: return "DW_LNS_const_add_pc";
+ case DW_LNS_fixed_advance_pc: return "DW_LNS_fixed_advance_pc";
+ case DW_LNS_set_prologue_end: return "DW_LNS_set_prologue_end";
+ case DW_LNS_set_epilogue_begin: return "DW_LNS_set_epilogue_begin";
+ case DW_LNS_set_isa: return "DW_LNS_set_isa";
}
return 0;
}
@@ -502,11 +589,11 @@ const char *llvm::dwarf::LNStandardString(unsigned Standard) {
const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
switch (Encoding) {
// Line Number Extended Opcode Encodings
- case DW_LNE_end_sequence: return "LNE_end_sequence";
- case DW_LNE_set_address: return "LNE_set_address";
- case DW_LNE_define_file: return "LNE_define_file";
- case DW_LNE_lo_user: return "LNE_lo_user";
- case DW_LNE_hi_user: return "LNE_hi_user";
+ case DW_LNE_end_sequence: return "DW_LNE_end_sequence";
+ case DW_LNE_set_address: return "DW_LNE_set_address";
+ case DW_LNE_define_file: return "DW_LNE_define_file";
+ case DW_LNE_lo_user: return "DW_LNE_lo_user";
+ case DW_LNE_hi_user: return "DW_LNE_hi_user";
}
return 0;
}
@@ -516,11 +603,11 @@ const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
switch (Encoding) {
// Macinfo Type Encodings
- case DW_MACINFO_define: return "MACINFO_define";
- case DW_MACINFO_undef: return "MACINFO_undef";
- case DW_MACINFO_start_file: return "MACINFO_start_file";
- case DW_MACINFO_end_file: return "MACINFO_end_file";
- case DW_MACINFO_vendor_ext: return "MACINFO_vendor_ext";
+ case DW_MACINFO_define: return "DW_MACINFO_define";
+ case DW_MACINFO_undef: return "DW_MACINFO_undef";
+ case DW_MACINFO_start_file: return "DW_MACINFO_start_file";
+ case DW_MACINFO_end_file: return "DW_MACINFO_end_file";
+ case DW_MACINFO_vendor_ext: return "DW_MACINFO_vendor_ext";
}
return 0;
}
@@ -529,33 +616,33 @@ const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
/// encodings.
const char *llvm::dwarf::CallFrameString(unsigned Encoding) {
switch (Encoding) {
- case DW_CFA_advance_loc: return "CFA_advance_loc";
- case DW_CFA_offset: return "CFA_offset";
- case DW_CFA_restore: return "CFA_restore";
- case DW_CFA_set_loc: return "CFA_set_loc";
- case DW_CFA_advance_loc1: return "CFA_advance_loc1";
- case DW_CFA_advance_loc2: return "CFA_advance_loc2";
- case DW_CFA_advance_loc4: return "CFA_advance_loc4";
- case DW_CFA_offset_extended: return "CFA_offset_extended";
- case DW_CFA_restore_extended: return "CFA_restore_extended";
- case DW_CFA_undefined: return "CFA_undefined";
- case DW_CFA_same_value: return "CFA_same_value";
- case DW_CFA_register: return "CFA_register";
- case DW_CFA_remember_state: return "CFA_remember_state";
- case DW_CFA_restore_state: return "CFA_restore_state";
- case DW_CFA_def_cfa: return "CFA_def_cfa";
- case DW_CFA_def_cfa_register: return "CFA_def_cfa_register";
- case DW_CFA_def_cfa_offset: return "CFA_def_cfa_offset";
- case DW_CFA_def_cfa_expression: return "CFA_def_cfa_expression";
- case DW_CFA_expression: return "CFA_expression";
- case DW_CFA_offset_extended_sf: return "CFA_offset_extended_sf";
- case DW_CFA_def_cfa_sf: return "CFA_def_cfa_sf";
- case DW_CFA_def_cfa_offset_sf: return "CFA_def_cfa_offset_sf";
- case DW_CFA_val_offset: return "CFA_val_offset";
- case DW_CFA_val_offset_sf: return "CFA_val_offset_sf";
- case DW_CFA_val_expression: return "CFA_val_expression";
- case DW_CFA_lo_user: return "CFA_lo_user";
- case DW_CFA_hi_user: return "CFA_hi_user";
+ case DW_CFA_advance_loc: return "DW_CFA_advance_loc";
+ case DW_CFA_offset: return "DW_CFA_offset";
+ case DW_CFA_restore: return "DW_CFA_restore";
+ case DW_CFA_set_loc: return "DW_CFA_set_loc";
+ case DW_CFA_advance_loc1: return "DW_CFA_advance_loc1";
+ case DW_CFA_advance_loc2: return "DW_CFA_advance_loc2";
+ case DW_CFA_advance_loc4: return "DW_CFA_advance_loc4";
+ case DW_CFA_offset_extended: return "DW_CFA_offset_extended";
+ case DW_CFA_restore_extended: return "DW_CFA_restore_extended";
+ case DW_CFA_undefined: return "DW_CFA_undefined";
+ case DW_CFA_same_value: return "DW_CFA_same_value";
+ case DW_CFA_register: return "DW_CFA_register";
+ case DW_CFA_remember_state: return "DW_CFA_remember_state";
+ case DW_CFA_restore_state: return "DW_CFA_restore_state";
+ case DW_CFA_def_cfa: return "DW_CFA_def_cfa";
+ case DW_CFA_def_cfa_register: return "DW_CFA_def_cfa_register";
+ case DW_CFA_def_cfa_offset: return "DW_CFA_def_cfa_offset";
+ case DW_CFA_def_cfa_expression: return "DW_CFA_def_cfa_expression";
+ case DW_CFA_expression: return "DW_CFA_expression";
+ case DW_CFA_offset_extended_sf: return "DW_CFA_offset_extended_sf";
+ case DW_CFA_def_cfa_sf: return "DW_CFA_def_cfa_sf";
+ case DW_CFA_def_cfa_offset_sf: return "DW_CFA_def_cfa_offset_sf";
+ case DW_CFA_val_offset: return "DW_CFA_val_offset";
+ case DW_CFA_val_offset_sf: return "DW_CFA_val_offset_sf";
+ case DW_CFA_val_expression: return "DW_CFA_val_expression";
+ case DW_CFA_lo_user: return "DW_CFA_lo_user";
+ case DW_CFA_hi_user: return "DW_CFA_hi_user";
}
return 0;
}
diff --git a/contrib/llvm/lib/Support/FileUtilities.cpp b/contrib/llvm/lib/Support/FileUtilities.cpp
index 095395f..1bde2fe 100644
--- a/contrib/llvm/lib/Support/FileUtilities.cpp
+++ b/contrib/llvm/lib/Support/FileUtilities.cpp
@@ -51,7 +51,15 @@ static const char *BackupNumber(const char *Pos, const char *FirstChar) {
if (!isNumberChar(*Pos)) return Pos;
// Otherwise, return to the start of the number.
+ bool HasPeriod = false;
while (Pos > FirstChar && isNumberChar(Pos[-1])) {
+ // Backup over at most one period.
+ if (Pos[-1] == '.') {
+ if (HasPeriod)
+ break;
+ HasPeriod = true;
+ }
+
--Pos;
if (Pos > FirstChar && isSignedChar(Pos[0]) && !isExponentChar(Pos[-1]))
break;
@@ -204,16 +212,16 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA,
const char *F1P = File1Start;
const char *F2P = File2Start;
- if (A_size == B_size) {
- // Are the buffers identical? Common case: Handle this efficiently.
- if (std::memcmp(File1Start, File2Start, A_size) == 0)
- return 0;
+ // Are the buffers identical? Common case: Handle this efficiently.
+ if (A_size == B_size &&
+ std::memcmp(File1Start, File2Start, A_size) == 0)
+ return 0;
- if (AbsTol == 0 && RelTol == 0) {
- if (Error)
- *Error = "Files differ without tolerance allowance";
- return 1; // Files different!
- }
+ // Otherwise, we are done a tolerances are set.
+ if (AbsTol == 0 && RelTol == 0) {
+ if (Error)
+ *Error = "Files differ without tolerance allowance";
+ return 1; // Files different!
}
bool CompareFailed = false;
diff --git a/contrib/llvm/lib/Support/FoldingSet.cpp b/contrib/llvm/lib/Support/FoldingSet.cpp
index 3f467fe..b8dca33 100644
--- a/contrib/llvm/lib/Support/FoldingSet.cpp
+++ b/contrib/llvm/lib/Support/FoldingSet.cpp
@@ -175,6 +175,14 @@ static void **GetBucketFor(const FoldingSetNodeID &ID,
return Buckets + BucketNum;
}
+/// AllocateBuckets - Allocated initialized bucket memory.
+static void **AllocateBuckets(unsigned NumBuckets) {
+ void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*)));
+ // Set the very last bucket to be a non-null "pointer".
+ Buckets[NumBuckets] = reinterpret_cast<void*>(-1);
+ return Buckets;
+}
+
//===----------------------------------------------------------------------===//
// FoldingSetImpl Implementation
@@ -182,11 +190,11 @@ FoldingSetImpl::FoldingSetImpl(unsigned Log2InitSize) {
assert(5 < Log2InitSize && Log2InitSize < 32 &&
"Initial hash table size out of range");
NumBuckets = 1 << Log2InitSize;
- Buckets = new void*[NumBuckets+1];
- clear();
+ Buckets = AllocateBuckets(NumBuckets);
+ NumNodes = 0;
}
FoldingSetImpl::~FoldingSetImpl() {
- delete [] Buckets;
+ free(Buckets);
}
void FoldingSetImpl::clear() {
// Set all but the last bucket to null pointers.
@@ -207,8 +215,8 @@ void FoldingSetImpl::GrowHashTable() {
NumBuckets <<= 1;
// Clear out new buckets.
- Buckets = new void*[NumBuckets+1];
- clear();
+ Buckets = AllocateBuckets(NumBuckets);
+ NumNodes = 0;
// Walk the old buckets, rehashing nodes into their new place.
FoldingSetNodeID ID;
@@ -227,7 +235,7 @@ void FoldingSetImpl::GrowHashTable() {
}
}
- delete[] OldBuckets;
+ free(OldBuckets);
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
diff --git a/contrib/llvm/lib/Support/MemoryBuffer.cpp b/contrib/llvm/lib/Support/MemoryBuffer.cpp
index 2b95089..542162d 100644
--- a/contrib/llvm/lib/Support/MemoryBuffer.cpp
+++ b/contrib/llvm/lib/Support/MemoryBuffer.cpp
@@ -14,6 +14,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/System/Errno.h"
#include "llvm/System/Path.h"
#include "llvm/System/Process.h"
@@ -37,22 +38,7 @@ using namespace llvm;
// MemoryBuffer implementation itself.
//===----------------------------------------------------------------------===//
-MemoryBuffer::~MemoryBuffer() {
- if (MustDeleteBuffer)
- free((void*)BufferStart);
-}
-
-/// initCopyOf - Initialize this source buffer with a copy of the specified
-/// memory range. We make the copy so that we can null terminate it
-/// successfully.
-void MemoryBuffer::initCopyOf(const char *BufStart, const char *BufEnd) {
- size_t Size = BufEnd-BufStart;
- BufferStart = (char *)malloc(Size+1);
- BufferEnd = BufferStart+Size;
- memcpy(const_cast<char*>(BufferStart), BufStart, Size);
- *const_cast<char*>(BufferEnd) = 0; // Null terminate buffer.
- MustDeleteBuffer = true;
-}
+MemoryBuffer::~MemoryBuffer() { }
/// init - Initialize this MemoryBuffer as a reference to externally allocated
/// memory, memory that we know is already null terminated.
@@ -60,27 +46,38 @@ void MemoryBuffer::init(const char *BufStart, const char *BufEnd) {
assert(BufEnd[0] == 0 && "Buffer is not null terminated!");
BufferStart = BufStart;
BufferEnd = BufEnd;
- MustDeleteBuffer = false;
}
//===----------------------------------------------------------------------===//
// MemoryBufferMem implementation.
//===----------------------------------------------------------------------===//
+/// CopyStringRef - Copies contents of a StringRef into a block of memory and
+/// null-terminates it.
+static void CopyStringRef(char *Memory, StringRef Data) {
+ memcpy(Memory, Data.data(), Data.size());
+ Memory[Data.size()] = 0; // Null terminate string.
+}
+
+/// GetNamedBuffer - Allocates a new MemoryBuffer with Name copied after it.
+template <typename T>
+static T* GetNamedBuffer(StringRef Buffer, StringRef Name) {
+ char *Mem = static_cast<char*>(operator new(sizeof(T) + Name.size() + 1));
+ CopyStringRef(Mem + sizeof(T), Name);
+ return new (Mem) T(Buffer);
+}
+
namespace {
+/// MemoryBufferMem - Named MemoryBuffer pointing to a block of memory.
class MemoryBufferMem : public MemoryBuffer {
- std::string FileID;
public:
- MemoryBufferMem(StringRef InputData, StringRef FID, bool Copy = false)
- : FileID(FID) {
- if (!Copy)
- init(InputData.data(), InputData.data()+InputData.size());
- else
- initCopyOf(InputData.data(), InputData.data()+InputData.size());
+ MemoryBufferMem(StringRef InputData) {
+ init(InputData.begin(), InputData.end());
}
-
+
virtual const char *getBufferIdentifier() const {
- return FileID.c_str();
+ // The name is stored after the class itself.
+ return reinterpret_cast<const char*>(this + 1);
}
};
}
@@ -88,42 +85,55 @@ public:
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that EndPtr[0] must be a null byte and be accessible!
MemoryBuffer *MemoryBuffer::getMemBuffer(StringRef InputData,
- const char *BufferName) {
- return new MemoryBufferMem(InputData, BufferName);
+ StringRef BufferName) {
+ return GetNamedBuffer<MemoryBufferMem>(InputData, BufferName);
}
/// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
/// copying the contents and taking ownership of it. This has no requirements
/// on EndPtr[0].
MemoryBuffer *MemoryBuffer::getMemBufferCopy(StringRef InputData,
- const char *BufferName) {
- return new MemoryBufferMem(InputData, BufferName, true);
+ StringRef BufferName) {
+ MemoryBuffer *Buf = getNewUninitMemBuffer(InputData.size(), BufferName);
+ if (!Buf) return 0;
+ memcpy(const_cast<char*>(Buf->getBufferStart()), InputData.data(),
+ InputData.size());
+ return Buf;
}
/// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
-/// that is completely initialized to zeros. Note that the caller should
-/// initialize the memory allocated by this method. The memory is owned by
-/// the MemoryBuffer object.
+/// that is not initialized. Note that the caller should initialize the
+/// memory allocated by this method. The memory is owned by the MemoryBuffer
+/// object.
MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
StringRef BufferName) {
- char *Buf = (char *)malloc(Size+1);
- if (!Buf) return 0;
- Buf[Size] = 0;
- MemoryBufferMem *SB = new MemoryBufferMem(StringRef(Buf, Size), BufferName);
- // The memory for this buffer is owned by the MemoryBuffer.
- SB->MustDeleteBuffer = true;
- return SB;
+ // Allocate space for the MemoryBuffer, the data and the name. It is important
+ // that MemoryBuffer and data are aligned so PointerIntPair works with them.
+ size_t AlignedStringLen =
+ RoundUpToAlignment(sizeof(MemoryBufferMem) + BufferName.size() + 1,
+ sizeof(void*)); // TODO: Is sizeof(void*) enough?
+ size_t RealLen = AlignedStringLen + Size + 1;
+ char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
+ if (!Mem) return 0;
+
+ // The name is stored after the class itself.
+ CopyStringRef(Mem + sizeof(MemoryBufferMem), BufferName);
+
+ // The buffer begins after the name and must be aligned.
+ char *Buf = Mem + AlignedStringLen;
+ Buf[Size] = 0; // Null terminate buffer.
+
+ return new (Mem) MemoryBufferMem(StringRef(Buf, Size));
}
/// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
/// is completely initialized to zeros. Note that the caller should
/// initialize the memory allocated by this method. The memory is owned by
/// the MemoryBuffer object.
-MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size,
- const char *BufferName) {
+MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) {
MemoryBuffer *SB = getNewUninitMemBuffer(Size, BufferName);
if (!SB) return 0;
- memset(const_cast<char*>(SB->getBufferStart()), 0, Size+1);
+ memset(const_cast<char*>(SB->getBufferStart()), 0, Size);
return SB;
}
@@ -137,7 +147,16 @@ MemoryBuffer *MemoryBuffer::getFileOrSTDIN(StringRef Filename,
int64_t FileSize,
struct stat *FileInfo) {
if (Filename == "-")
- return getSTDIN();
+ return getSTDIN(ErrStr);
+ return getFile(Filename, ErrStr, FileSize, FileInfo);
+}
+
+MemoryBuffer *MemoryBuffer::getFileOrSTDIN(const char *Filename,
+ std::string *ErrStr,
+ int64_t FileSize,
+ struct stat *FileInfo) {
+ if (strcmp(Filename, "-") == 0)
+ return getSTDIN(ErrStr);
return getFile(Filename, ErrStr, FileSize, FileInfo);
}
@@ -149,18 +168,11 @@ namespace {
/// MemoryBufferMMapFile - This represents a file that was mapped in with the
/// sys::Path::MapInFilePages method. When destroyed, it calls the
/// sys::Path::UnMapFilePages method.
-class MemoryBufferMMapFile : public MemoryBuffer {
- std::string Filename;
+class MemoryBufferMMapFile : public MemoryBufferMem {
public:
- MemoryBufferMMapFile(StringRef filename, const char *Pages, uint64_t Size)
- : Filename(filename) {
- init(Pages, Pages+Size);
- }
-
- virtual const char *getBufferIdentifier() const {
- return Filename.c_str();
- }
-
+ MemoryBufferMMapFile(StringRef Buffer)
+ : MemoryBufferMem(Buffer) { }
+
~MemoryBufferMMapFile() {
sys::Path::UnMapFilePages(getBufferStart(), getBufferSize());
}
@@ -170,19 +182,24 @@ public:
class FileCloser {
int FD;
public:
- FileCloser(int FD) : FD(FD) {}
+ explicit FileCloser(int FD) : FD(FD) {}
~FileCloser() { ::close(FD); }
};
}
MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
int64_t FileSize, struct stat *FileInfo) {
- int OpenFlags = 0;
+ SmallString<256> PathBuf(Filename.begin(), Filename.end());
+ return MemoryBuffer::getFile(PathBuf.c_str(), ErrStr, FileSize, FileInfo);
+}
+
+MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
+ int64_t FileSize, struct stat *FileInfo) {
+ int OpenFlags = O_RDONLY;
#ifdef O_BINARY
OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
#endif
- SmallString<256> PathBuf(Filename.begin(), Filename.end());
- int FD = ::open(PathBuf.c_str(), O_RDONLY|OpenFlags);
+ int FD = ::open(Filename, OpenFlags);
if (FD == -1) {
if (ErrStr) *ErrStr = sys::StrError();
return 0;
@@ -213,8 +230,8 @@ MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
if (FileSize >= 4096*4 &&
(FileSize & (sys::Process::GetPageSize()-1)) != 0) {
if (const char *Pages = sys::Path::MapInFilePages(FD, FileSize)) {
- // Close the file descriptor, now that the whole file is in memory.
- return new MemoryBufferMMapFile(Filename, Pages, FileSize);
+ return GetNamedBuffer<MemoryBufferMMapFile>(StringRef(Pages, FileSize),
+ Filename);
}
}
@@ -254,34 +271,27 @@ MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
// MemoryBuffer::getSTDIN implementation.
//===----------------------------------------------------------------------===//
-namespace {
-class STDINBufferFile : public MemoryBuffer {
-public:
- virtual const char *getBufferIdentifier() const {
- return "<stdin>";
- }
-};
-}
-
-MemoryBuffer *MemoryBuffer::getSTDIN() {
- char Buffer[4096*4];
-
- std::vector<char> FileData;
-
+MemoryBuffer *MemoryBuffer::getSTDIN(std::string *ErrStr) {
// Read in all of the data from stdin, we cannot mmap stdin.
//
// FIXME: That isn't necessarily true, we should try to mmap stdin and
// fallback if it fails.
sys::Program::ChangeStdinToBinary();
- size_t ReadBytes;
+
+ const ssize_t ChunkSize = 4096*4;
+ SmallString<ChunkSize> Buffer;
+ ssize_t ReadBytes;
+ // Read into Buffer until we hit EOF.
do {
- ReadBytes = fread(Buffer, sizeof(char), sizeof(Buffer), stdin);
- FileData.insert(FileData.end(), Buffer, Buffer+ReadBytes);
- } while (ReadBytes == sizeof(Buffer));
-
- FileData.push_back(0); // &FileData[Size] is invalid. So is &*FileData.end().
- size_t Size = FileData.size();
- MemoryBuffer *B = new STDINBufferFile();
- B->initCopyOf(&FileData[0], &FileData[Size-1]);
- return B;
+ Buffer.reserve(Buffer.size() + ChunkSize);
+ ReadBytes = read(0, Buffer.end(), ChunkSize);
+ if (ReadBytes == -1) {
+ if (errno == EINTR) continue;
+ if (ErrStr) *ErrStr = sys::StrError();
+ return 0;
+ }
+ Buffer.set_size(Buffer.size() + ReadBytes);
+ } while (ReadBytes != 0);
+
+ return getMemBufferCopy(Buffer, "<stdin>");
}
diff --git a/contrib/llvm/lib/Support/PrettyStackTrace.cpp b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
index 7a04a53..a99ab2f 100644
--- a/contrib/llvm/lib/Support/PrettyStackTrace.cpp
+++ b/contrib/llvm/lib/Support/PrettyStackTrace.cpp
@@ -12,11 +12,17 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Config/config.h" // Get autoconf configuration settings
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Signals.h"
#include "llvm/System/ThreadLocal.h"
#include "llvm/ADT/SmallString.h"
+
+#ifdef HAVE_CRASHREPORTERCLIENT_H
+#include <CrashReporterClient.h>
+#endif
+
using namespace llvm;
namespace llvm {
@@ -48,8 +54,17 @@ static void PrintCurStackTrace(raw_ostream &OS) {
OS.flush();
}
-// Integrate with crash reporter.
-#ifdef __APPLE__
+// Integrate with crash reporter libraries.
+#if defined (__APPLE__) && defined (HAVE_CRASHREPORTERCLIENT_H)
+// If any clients of llvm try to link to libCrashReporterClient.a themselves,
+// only one crash info struct will be used.
+extern "C" {
+CRASH_REPORTER_CLIENT_HIDDEN
+struct crashreporter_annotations_t gCRAnnotations
+ __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION)))
+ = { CRASHREPORTER_ANNOTATIONS_VERSION, 0, 0, 0, 0 };
+}
+#elif defined (__APPLE__)
static const char *__crashreporter_info__ = 0;
asm(".desc ___crashreporter_info__, 0x10");
#endif
@@ -71,7 +86,11 @@ static void CrashHandler(void *Cookie) {
}
if (!TmpStr.empty()) {
+#ifndef HAVE_CRASHREPORTERCLIENT_H
__crashreporter_info__ = strdup(std::string(TmpStr.str()).c_str());
+#else
+ CRSetCrashLogMessage(std::string(TmpStr.str()).c_str());
+#endif
errs() << TmpStr.str();
}
diff --git a/contrib/llvm/lib/Support/Regex.cpp b/contrib/llvm/lib/Support/Regex.cpp
index a7631de..309ffb0 100644
--- a/contrib/llvm/lib/Support/Regex.cpp
+++ b/contrib/llvm/lib/Support/Regex.cpp
@@ -19,7 +19,7 @@
#include <string>
using namespace llvm;
-Regex::Regex(const StringRef &regex, unsigned Flags) {
+Regex::Regex(StringRef regex, unsigned Flags) {
unsigned flags = 0;
preg = new llvm_regex();
preg->re_endp = regex.end();
@@ -52,7 +52,7 @@ unsigned Regex::getNumMatches() const {
return preg->re_nsub;
}
-bool Regex::match(const StringRef &String, SmallVectorImpl<StringRef> *Matches){
+bool Regex::match(StringRef String, SmallVectorImpl<StringRef> *Matches){
unsigned nmatch = Matches ? preg->re_nsub+1 : 0;
// pmatch needs to have at least one element.
diff --git a/contrib/llvm/lib/Support/SmallPtrSet.cpp b/contrib/llvm/lib/Support/SmallPtrSet.cpp
index 68938fa..504e649 100644
--- a/contrib/llvm/lib/Support/SmallPtrSet.cpp
+++ b/contrib/llvm/lib/Support/SmallPtrSet.cpp
@@ -166,10 +166,13 @@ void SmallPtrSetImpl::Grow() {
}
}
-SmallPtrSetImpl::SmallPtrSetImpl(const SmallPtrSetImpl& that) {
+SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage,
+ const SmallPtrSetImpl& that) {
+ SmallArray = SmallStorage;
+
// If we're becoming small, prepare to insert into our stack space
if (that.isSmall()) {
- CurArray = &SmallArray[0];
+ CurArray = SmallArray;
// Otherwise, allocate new heap space (unless we were the same size)
} else {
CurArray = (const void**)malloc(sizeof(void*) * (that.CurArraySize+1));
@@ -197,7 +200,7 @@ void SmallPtrSetImpl::CopyFrom(const SmallPtrSetImpl &RHS) {
if (RHS.isSmall()) {
if (!isSmall())
free(CurArray);
- CurArray = &SmallArray[0];
+ CurArray = SmallArray;
// Otherwise, allocate new heap space (unless we were the same size)
} else if (CurArraySize != RHS.CurArraySize) {
if (isSmall())
diff --git a/contrib/llvm/lib/Support/SmallVector.cpp b/contrib/llvm/lib/Support/SmallVector.cpp
index 6821382..2e17af8 100644
--- a/contrib/llvm/lib/Support/SmallVector.cpp
+++ b/contrib/llvm/lib/Support/SmallVector.cpp
@@ -21,15 +21,18 @@ void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) {
size_t NewCapacityInBytes = 2 * capacity_in_bytes();
if (NewCapacityInBytes < MinSizeInBytes)
NewCapacityInBytes = MinSizeInBytes;
- void *NewElts = operator new(NewCapacityInBytes);
-
- // Copy the elements over. No need to run dtors on PODs.
- memcpy(NewElts, this->BeginX, CurSizeBytes);
-
- // If this wasn't grown from the inline copy, deallocate the old space.
- if (!this->isSmall())
- operator delete(this->BeginX);
-
+
+ void *NewElts;
+ if (this->isSmall()) {
+ NewElts = malloc(NewCapacityInBytes);
+
+ // Copy the elements over. No need to run dtors on PODs.
+ memcpy(NewElts, this->BeginX, CurSizeBytes);
+ } else {
+ // If this wasn't grown from the inline copy, grow the allocated space.
+ NewElts = realloc(this->BeginX, NewCapacityInBytes);
+ }
+
this->EndX = (char*)NewElts+CurSizeBytes;
this->BeginX = NewElts;
this->CapacityX = (char*)this->BeginX + NewCapacityInBytes;
diff --git a/contrib/llvm/lib/Support/StringPool.cpp b/contrib/llvm/lib/Support/StringPool.cpp
index 1ee917f..ff607cf 100644
--- a/contrib/llvm/lib/Support/StringPool.cpp
+++ b/contrib/llvm/lib/Support/StringPool.cpp
@@ -22,7 +22,7 @@ StringPool::~StringPool() {
assert(InternTable.empty() && "PooledStringPtr leaked!");
}
-PooledStringPtr StringPool::intern(const StringRef &Key) {
+PooledStringPtr StringPool::intern(StringRef Key) {
table_t::iterator I = InternTable.find(Key);
if (I != InternTable.end())
return PooledStringPtr(&*I);
diff --git a/contrib/llvm/lib/Support/Timer.cpp b/contrib/llvm/lib/Support/Timer.cpp
index 784b77c..44ee177 100644
--- a/contrib/llvm/lib/Support/Timer.cpp
+++ b/contrib/llvm/lib/Support/Timer.cpp
@@ -236,11 +236,13 @@ static Timer &getNamedRegionTimer(StringRef Name) {
return T;
}
-NamedRegionTimer::NamedRegionTimer(StringRef Name)
- : TimeRegion(getNamedRegionTimer(Name)) {}
+NamedRegionTimer::NamedRegionTimer(StringRef Name,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &getNamedRegionTimer(Name)) {}
-NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName)
- : TimeRegion(NamedGroupedTimers->get(Name, GroupName)) {}
+NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &NamedGroupedTimers->get(Name, GroupName)) {}
//===----------------------------------------------------------------------===//
// TimerGroup Implementation
diff --git a/contrib/llvm/lib/Support/Triple.cpp b/contrib/llvm/lib/Support/Triple.cpp
index 9796ca5..6a70449 100644
--- a/contrib/llvm/lib/Support/Triple.cpp
+++ b/contrib/llvm/lib/Support/Triple.cpp
@@ -104,6 +104,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
case Solaris: return "solaris";
case Win32: return "win32";
case Haiku: return "haiku";
+ case Minix: return "minix";
}
return "<invalid>";
@@ -326,7 +327,9 @@ void Triple::Parse() const {
else if (OSName.startswith("win32"))
OS = Win32;
else if (OSName.startswith("haiku"))
- OS = Haiku;
+ OS = Haiku;
+ else if (OSName.startswith("minix"))
+ OS = Minix;
else
OS = UnknownOS;
diff --git a/contrib/llvm/lib/Support/raw_ostream.cpp b/contrib/llvm/lib/Support/raw_ostream.cpp
index 11cf0ec..8054ae6 100644
--- a/contrib/llvm/lib/Support/raw_ostream.cpp
+++ b/contrib/llvm/lib/Support/raw_ostream.cpp
@@ -427,10 +427,9 @@ raw_fd_ostream::~raw_fd_ostream() {
void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) {
assert(FD >= 0 && "File already closed.");
pos += Size;
- ssize_t ret;
do {
- ret = ::write(FD, Ptr, Size);
+ ssize_t ret = ::write(FD, Ptr, Size);
if (ret < 0) {
// If it's a recoverable error, swallow it and retry the write.
@@ -482,7 +481,7 @@ uint64_t raw_fd_ostream::seek(uint64_t off) {
}
size_t raw_fd_ostream::preferred_buffer_size() const {
-#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(_MINIX)
+#if !defined(_MSC_VER) && !defined(__MINGW32__) && !defined(__minix)
// Windows and Minix have no st_blksize.
assert(FD >= 0 && "File not yet open!");
struct stat statbuf;
@@ -496,8 +495,9 @@ size_t raw_fd_ostream::preferred_buffer_size() const {
return 0;
// Return the preferred block size.
return statbuf.st_blksize;
-#endif
+#else
return raw_ostream::preferred_buffer_size();
+#endif
}
raw_ostream &raw_fd_ostream::changeColor(enum Colors colors, bool bold,
diff --git a/contrib/llvm/lib/System/Disassembler.cpp b/contrib/llvm/lib/System/Disassembler.cpp
index bad427a..139e3be 100644
--- a/contrib/llvm/lib/System/Disassembler.cpp
+++ b/contrib/llvm/lib/System/Disassembler.cpp
@@ -44,33 +44,29 @@ std::string llvm::sys::disassembleBuffer(uint8_t* start, size_t length,
uint64_t pc) {
std::stringstream res;
-#if defined (__i386__) || defined (__amd64__) || defined (__x86_64__)
+#if (defined (__i386__) || defined (__amd64__) || defined (__x86_64__)) \
+ && USE_UDIS86
unsigned bits;
# if defined(__i386__)
bits = 32;
# else
bits = 64;
# endif
-
-# if USE_UDIS86
+
ud_t ud_obj;
-
+
ud_init(&ud_obj);
ud_set_input_buffer(&ud_obj, start, length);
ud_set_mode(&ud_obj, bits);
ud_set_pc(&ud_obj, pc);
ud_set_syntax(&ud_obj, UD_SYN_ATT);
-
+
res << std::setbase(16)
<< std::setw(bits/4);
-
+
while (ud_disassemble(&ud_obj)) {
res << ud_insn_off(&ud_obj) << ":\t" << ud_insn_asm(&ud_obj) << "\n";
}
-# else
- res << "No disassembler available. See configure help for options.\n";
-# endif
-
#else
res << "No disassembler available. See configure help for options.\n";
#endif
diff --git a/contrib/llvm/lib/System/Path.cpp b/contrib/llvm/lib/System/Path.cpp
index 6844530..1235257 100644
--- a/contrib/llvm/lib/System/Path.cpp
+++ b/contrib/llvm/lib/System/Path.cpp
@@ -136,26 +136,23 @@ sys::IdentifyFileType(const char *magic, unsigned length) {
bool
Path::isArchive() const {
- if (canRead())
- return hasMagicNumber("!<arch>\012");
- return false;
+ return hasMagicNumber("!<arch>\012");
}
bool
Path::isDynamicLibrary() const {
- if (canRead()) {
- std::string Magic;
- if (getMagicNumber(Magic, 64))
- switch (IdentifyFileType(Magic.c_str(),
- static_cast<unsigned>(Magic.length()))) {
- default: return false;
- case Mach_O_FixedVirtualMemorySharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLib_FileType:
- case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
- case ELF_SharedObject_FileType:
- case COFF_FileType: return true;
- }
- }
+ std::string Magic;
+ if (getMagicNumber(Magic, 64))
+ switch (IdentifyFileType(Magic.c_str(),
+ static_cast<unsigned>(Magic.length()))) {
+ default: return false;
+ case Mach_O_FixedVirtualMemorySharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLib_FileType:
+ case Mach_O_DynamicallyLinkedSharedLibStub_FileType:
+ case ELF_SharedObject_FileType:
+ case COFF_FileType: return true;
+ }
+
return false;
}
diff --git a/contrib/llvm/lib/System/Unix/Path.inc b/contrib/llvm/lib/System/Unix/Path.inc
index 74596dc..bc104a3 100644
--- a/contrib/llvm/lib/System/Unix/Path.inc
+++ b/contrib/llvm/lib/System/Unix/Path.inc
@@ -421,10 +421,8 @@ bool Path::getMagicNumber(std::string &Magic, unsigned len) const {
return false;
ssize_t bytes_read = ::read(fd, Buf, len);
::close(fd);
- if (ssize_t(len) != bytes_read) {
- Magic.clear();
+ if (ssize_t(len) != bytes_read)
return false;
- }
Magic.assign(Buf, len);
return true;
}
@@ -890,14 +888,19 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) {
#else
// Okay, looks like we have to do it all by our lonesome.
static unsigned FCounter = 0;
- unsigned offset = path.size() + 1;
- while ( FCounter < 999999 && exists()) {
- sprintf(FNBuffer+offset,"%06u",++FCounter);
+ // Try to initialize with unique value.
+ if (FCounter == 0) FCounter = ((unsigned)getpid() & 0xFFFF) << 8;
+ char* pos = strstr(FNBuffer, "XXXXXX");
+ do {
+ if (++FCounter > 0xFFFFFF) {
+ return MakeErrMsg(ErrMsg,
+ path + ": can't make unique filename: too many files");
+ }
+ sprintf(pos, "%06X", FCounter);
path = FNBuffer;
- }
- if (FCounter > 999999)
- return MakeErrMsg(ErrMsg,
- path + ": can't make unique filename: too many files");
+ } while (exists());
+ // POSSIBLE SECURITY BUG: An attacker can easily guess the name and exploit
+ // LLVM.
#endif
return false;
}
diff --git a/contrib/llvm/lib/System/Unix/Program.inc b/contrib/llvm/lib/System/Unix/Program.inc
index 358415f..0209f5a 100644
--- a/contrib/llvm/lib/System/Unix/Program.inc
+++ b/contrib/llvm/lib/System/Unix/Program.inc
@@ -310,12 +310,9 @@ Program::Wait(unsigned secondsToWait,
// fact of having a handler at all causes the wait below to return with EINTR,
// unlike if we used SIG_IGN.
if (secondsToWait) {
-#ifndef __HAIKU__
- Act.sa_sigaction = 0;
-#endif
+ memset(&Act, 0, sizeof(Act));
Act.sa_handler = TimeOutHandler;
sigemptyset(&Act.sa_mask);
- Act.sa_flags = 0;
sigaction(SIGALRM, &Act, &Old);
alarm(secondsToWait);
}
diff --git a/contrib/llvm/lib/System/Unix/Signals.inc b/contrib/llvm/lib/System/Unix/Signals.inc
index 9548816..1e74647 100644
--- a/contrib/llvm/lib/System/Unix/Signals.inc
+++ b/contrib/llvm/lib/System/Unix/Signals.inc
@@ -111,6 +111,14 @@ static void UnregisterHandlers() {
}
+/// RemoveFilesToRemove - Process the FilesToRemove list. This function
+/// should be called with the SignalsMutex lock held.
+static void RemoveFilesToRemove() {
+ while (!FilesToRemove.empty()) {
+ FilesToRemove.back().eraseFromDisk(true);
+ FilesToRemove.pop_back();
+ }
+}
// SignalHandler - The signal handler that runs.
static RETSIGTYPE SignalHandler(int Sig) {
@@ -126,10 +134,7 @@ static RETSIGTYPE SignalHandler(int Sig) {
sigprocmask(SIG_UNBLOCK, &SigMask, 0);
SignalsMutex.acquire();
- while (!FilesToRemove.empty()) {
- FilesToRemove.back().eraseFromDisk(true);
- FilesToRemove.pop_back();
- }
+ RemoveFilesToRemove();
if (std::find(IntSigs, IntSigsEnd, Sig) != IntSigsEnd) {
if (InterruptFunction) {
@@ -153,7 +158,9 @@ static RETSIGTYPE SignalHandler(int Sig) {
}
void llvm::sys::RunInterruptHandlers() {
- SignalHandler(SIGINT);
+ SignalsMutex.acquire();
+ RemoveFilesToRemove();
+ SignalsMutex.release();
}
void llvm::sys::SetInterruptFunction(void (*IF)()) {
diff --git a/contrib/llvm/lib/System/Win32/Path.inc b/contrib/llvm/lib/System/Win32/Path.inc
index 5a0052f..379527d 100644
--- a/contrib/llvm/lib/System/Win32/Path.inc
+++ b/contrib/llvm/lib/System/Win32/Path.inc
@@ -281,12 +281,6 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) {
// FIXME: the above set of functions don't map to Windows very well.
-bool
-Path::isRootDirectory() const {
- size_t len = path.size();
- return len > 0 && path[len-1] == '/';
-}
-
StringRef Path::getDirname() const {
return getDirnameCharSep(path, "/");
}
diff --git a/contrib/llvm/lib/System/Win32/Signals.inc b/contrib/llvm/lib/System/Win32/Signals.inc
index a3a393c..d6db71b 100644
--- a/contrib/llvm/lib/System/Win32/Signals.inc
+++ b/contrib/llvm/lib/System/Win32/Signals.inc
@@ -283,7 +283,7 @@ static LONG WINAPI LLVMUnhandledExceptionFilter(LPEXCEPTION_POINTERS ep) {
#ifdef _MSC_VER
if (ExitOnUnhandledExceptions)
- _exit(-3);
+ _exit(-3);
#endif
// Allow dialog box to pop up allowing choice to start debugger.
diff --git a/contrib/llvm/lib/Target/ARM/ARM.h b/contrib/llvm/lib/Target/ARM/ARM.h
index ae7ae59..14825a7 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.h
+++ b/contrib/llvm/lib/Target/ARM/ARM.h
@@ -90,10 +90,6 @@ inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
}
}
-/// ModelWithRegSequence - Return true if isel should use REG_SEQUENCE to model
-/// operations involving sub-registers.
-bool ModelWithRegSequence();
-
FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
CodeGenOpt::Level OptLevel);
diff --git a/contrib/llvm/lib/Target/ARM/ARM.td b/contrib/llvm/lib/Target/ARM/ARM.td
index f1e6a9f..fa64d6c 100644
--- a/contrib/llvm/lib/Target/ARM/ARM.td
+++ b/contrib/llvm/lib/Target/ARM/ARM.td
@@ -48,6 +48,8 @@ def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true",
"Enable divide instructions">;
def FeatureT2ExtractPack: SubtargetFeature<"t2xtpk", "HasT2ExtractPack", "true",
"Enable Thumb2 extract and pack instructions">;
+def FeatureSlowFPBrcc : SubtargetFeature<"slow-fp-brcc", "SlowFPBrcc", "true",
+ "FP compare + branch is slow">;
// Some processors have multiply-accumulate instructions that don't
// play nicely with other VFP instructions, and it's generally better
@@ -129,7 +131,7 @@ def : Processor<"arm1156t2f-s", ARMV6Itineraries,
// V7 Processors.
def : Processor<"cortex-a8", CortexA8Itineraries,
[ArchV7A, FeatureThumb2, FeatureNEON, FeatureHasSlowVMLx,
- FeatureNEONForFP, FeatureT2ExtractPack]>;
+ FeatureSlowFPBrcc, FeatureNEONForFP, FeatureT2ExtractPack]>;
def : Processor<"cortex-a9", CortexA9Itineraries,
[ArchV7A, FeatureThumb2, FeatureNEON, FeatureT2ExtractPack]>;
def : ProcNoItin<"cortex-m3", [ArchV7M, FeatureThumb2, FeatureHWDiv]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h b/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
index e68354a..92a13f1 100644
--- a/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
+++ b/contrib/llvm/lib/Target/ARM/ARMAddressingModes.h
@@ -519,7 +519,70 @@ namespace ARM_AM {
//
// This is stored in two operands [regaddr, align]. The first is the
// address register. The second operand is the value of the alignment
- // specifier to use or zero if no explicit alignment.
+ // specifier in bytes or zero if no explicit alignment.
+ // Valid alignments depend on the specific instruction.
+
+ //===--------------------------------------------------------------------===//
+ // NEON Modified Immediates
+ //===--------------------------------------------------------------------===//
+ //
+ // Several NEON instructions (e.g., VMOV) take a "modified immediate"
+ // vector operand, where a small immediate encoded in the instruction
+ // specifies a full NEON vector value. These modified immediates are
+ // represented here as encoded integers. The low 8 bits hold the immediate
+ // value; bit 12 holds the "Op" field of the instruction, and bits 11-8 hold
+ // the "Cmode" field of the instruction. The interfaces below treat the
+ // Op and Cmode values as a single 5-bit value.
+
+ static inline unsigned createNEONModImm(unsigned OpCmode, unsigned Val) {
+ return (OpCmode << 8) | Val;
+ }
+ static inline unsigned getNEONModImmOpCmode(unsigned ModImm) {
+ return (ModImm >> 8) & 0x1f;
+ }
+ static inline unsigned getNEONModImmVal(unsigned ModImm) {
+ return ModImm & 0xff;
+ }
+
+ /// decodeNEONModImm - Decode a NEON modified immediate value into the
+ /// element value and the element size in bits. (If the element size is
+ /// smaller than the vector, it is splatted into all the elements.)
+ static inline uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits) {
+ unsigned OpCmode = getNEONModImmOpCmode(ModImm);
+ unsigned Imm8 = getNEONModImmVal(ModImm);
+ uint64_t Val = 0;
+
+ if (OpCmode == 0xe) {
+ // 8-bit vector elements
+ Val = Imm8;
+ EltBits = 8;
+ } else if ((OpCmode & 0xc) == 0x8) {
+ // 16-bit vector elements
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ EltBits = 16;
+ } else if ((OpCmode & 0x8) == 0) {
+ // 32-bit vector elements, zero with one byte set
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ EltBits = 32;
+ } else if ((OpCmode & 0xe) == 0xc) {
+ // 32-bit vector elements, one byte with low bits set
+ unsigned ByteNum = 1 + (OpCmode & 0x1);
+ Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (2 - ByteNum)));
+ EltBits = 32;
+ } else if (OpCmode == 0x1e) {
+ // 64-bit vector elements
+ for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if ((ModImm >> ByteNum) & 1)
+ Val |= (uint64_t)0xff << (8 * ByteNum);
+ }
+ EltBits = 64;
+ } else {
+ assert(false && "Unsupported NEON immediate");
+ }
+ return Val;
+ }
} // end namespace ARM_AM
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 2528854..49c16f3 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -56,7 +56,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr *MI = MBBI;
MachineFunction &MF = *MI->getParent()->getParent();
- unsigned TSFlags = MI->getDesc().TSFlags;
+ uint64_t TSFlags = MI->getDesc().TSFlags;
bool isPre = false;
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
default: return NULL;
@@ -199,9 +199,9 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
bool
ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const {
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
if (CSI.empty())
return false;
@@ -227,8 +227,9 @@ ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
// Insert the spill to the stack frame. The register is killed at the spill
//
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
storeRegToStackSlot(MBB, MI, Reg, isKill,
- CSI[i].getFrameIdx(), CSI[i].getRegClass(), TRI);
+ CSI[i].getFrameIdx(), RC, TRI);
}
return true;
}
@@ -347,10 +348,8 @@ unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
-
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
int BOpc = !AFI->isThumbFunction()
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
@@ -364,17 +363,17 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch?
- BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
+ BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
return 1;
}
// Two-way conditional branch.
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
+ BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
- BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
return 2;
}
@@ -487,7 +486,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
// Basic size info comes from the TSFlags field.
const TargetInstrDesc &TID = MI->getDesc();
- unsigned TSFlags = TID.TSFlags;
+ uint64_t TSFlags = TID.TSFlags;
unsigned Opc = MI->getOpcode();
switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
@@ -524,11 +523,11 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
return 10;
case ARM::Int_eh_sjlj_setjmp:
case ARM::Int_eh_sjlj_setjmp_nofp:
- return 24;
+ return 20;
case ARM::tInt_eh_sjlj_setjmp:
case ARM::t2Int_eh_sjlj_setjmp:
case ARM::t2Int_eh_sjlj_setjmp_nofp:
- return 14;
+ return 12;
case ARM::BR_JTr:
case ARM::BR_JTm:
case ARM::BR_JTadd:
@@ -595,6 +594,7 @@ ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
return true;
}
case ARM::MOVr:
+ case ARM::MOVr_TC:
case ARM::tMOVr:
case ARM::tMOVgpr2tgpr:
case ARM::tMOVtgpr2gpr:
@@ -693,75 +693,44 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
-bool
-ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- // tGPR is used sometimes in ARM instructions that need to avoid using
- // certain registers. Just treat it as GPR here.
- if (DestRC == ARM::tGPRRegisterClass)
- DestRC = ARM::GPRRegisterClass;
- if (SrcRC == ARM::tGPRRegisterClass)
- SrcRC = ARM::GPRRegisterClass;
-
- // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
- if (DestRC == ARM::DPR_8RegisterClass)
- DestRC = ARM::DPR_VFP2RegisterClass;
- if (SrcRC == ARM::DPR_8RegisterClass)
- SrcRC = ARM::DPR_VFP2RegisterClass;
-
- // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
- if (DestRC == ARM::QPR_VFP2RegisterClass ||
- DestRC == ARM::QPR_8RegisterClass)
- DestRC = ARM::QPRRegisterClass;
- if (SrcRC == ARM::QPR_VFP2RegisterClass ||
- SrcRC == ARM::QPR_8RegisterClass)
- SrcRC = ARM::QPRRegisterClass;
-
- // Allow QQPR / QQPR_VFP2 cross-class copies.
- if (DestRC == ARM::QQPR_VFP2RegisterClass)
- DestRC = ARM::QQPRRegisterClass;
- if (SrcRC == ARM::QQPR_VFP2RegisterClass)
- SrcRC = ARM::QQPRRegisterClass;
-
- // Disallow copies of unequal sizes.
- if (DestRC != SrcRC && DestRC->getSize() != SrcRC->getSize())
- return false;
-
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::SPRRegisterClass)
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVRS), DestReg)
- .addReg(SrcReg));
- else
- AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
- DestReg).addReg(SrcReg)));
- } else {
- unsigned Opc;
-
- if (DestRC == ARM::SPRRegisterClass)
- Opc = (SrcRC == ARM::GPRRegisterClass ? ARM::VMOVSR : ARM::VMOVS);
- else if (DestRC == ARM::DPRRegisterClass)
- Opc = ARM::VMOVD;
- else if (DestRC == ARM::DPR_VFP2RegisterClass ||
- SrcRC == ARM::DPR_VFP2RegisterClass)
- // Always use neon reg-reg move if source or dest is NEON-only regclass.
- Opc = ARM::VMOVDneon;
- else if (DestRC == ARM::QPRRegisterClass)
- Opc = ARM::VMOVQ;
- else if (DestRC == ARM::QQPRRegisterClass)
- Opc = ARM::VMOVQQ;
- else if (DestRC == ARM::QQQQPRRegisterClass)
- Opc = ARM::VMOVQQQQ;
- else
- return false;
-
- AddDefaultPred(BuildMI(MBB, I, DL, get(Opc), DestReg).addReg(SrcReg));
+void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ bool GPRDest = ARM::GPRRegClass.contains(DestReg);
+ bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
+
+ if (GPRDest && GPRSrc) {
+ AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))));
+ return;
}
- return true;
+ bool SPRDest = ARM::SPRRegClass.contains(DestReg);
+ bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
+
+ unsigned Opc;
+ if (SPRDest && SPRSrc)
+ Opc = ARM::VMOVS;
+ else if (GPRDest && SPRSrc)
+ Opc = ARM::VMOVRS;
+ else if (SPRDest && GPRSrc)
+ Opc = ARM::VMOVSR;
+ else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVD;
+ else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVQ;
+ else if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVQQ;
+ else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
+ Opc = ARM::VMOVQQQQ;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
+ MIB.addReg(SrcReg, getKillRegState(KillSrc));
+ if (Opc != ARM::VMOVQQ && Opc != ARM::VMOVQQQQ)
+ AddDefaultPred(MIB);
}
static const
@@ -795,30 +764,34 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// tGPR is used sometimes in ARM instructions that need to avoid using
// certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
+ if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
RC = ARM::GPRRegisterClass;
- if (RC == ARM::GPRRegisterClass) {
+ switch (RC->getID()) {
+ case ARM::GPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::SPRRegisterClass) {
+ break;
+ case ARM::SPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::DPRRegisterClass ||
- RC == ARM::DPR_VFP2RegisterClass ||
- RC == ARM::DPR_8RegisterClass) {
+ break;
+ case ARM::DPRRegClassID:
+ case ARM::DPR_VFP2RegClassID:
+ case ARM::DPR_8RegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
.addReg(SrcReg, getKillRegState(isKill))
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::QPRRegisterClass ||
- RC == ARM::QPR_VFP2RegisterClass ||
- RC == ARM::QPR_8RegisterClass) {
+ break;
+ case ARM::QPRRegClassID:
+ case ARM::QPR_VFP2RegClassID:
+ case ARM::QPR_8RegClassID:
// FIXME: Neon instructions should support predicates
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
- .addFrameIndex(FI).addImm(128)
+ .addFrameIndex(FI).addImm(16)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO));
} else {
@@ -828,12 +801,14 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
.addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
.addMemOperand(MMO));
}
- } else if (RC == ARM::QQPRRegisterClass || RC == ARM::QQPR_VFP2RegisterClass){
+ break;
+ case ARM::QQPRRegClassID:
+ case ARM::QQPR_VFP2RegClassID:
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
// FIXME: It's possible to only store part of the QQ register if the
// spilled def has a sub-register index.
- MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST2q32))
- .addFrameIndex(FI).addImm(128);
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VST1d64Q))
+ .addFrameIndex(FI).addImm(16);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
@@ -850,8 +825,8 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
}
- } else {
- assert(RC == ARM::QQQQPRRegisterClass && "Unknown regclass!");
+ break;
+ case ARM::QQQQPRRegClassID: {
MachineInstrBuilder MIB =
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
.addFrameIndex(FI)
@@ -865,6 +840,10 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown regclass!");
}
}
@@ -886,26 +865,30 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
// tGPR is used sometimes in ARM instructions that need to avoid using
// certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
+ if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
RC = ARM::GPRRegisterClass;
- if (RC == ARM::GPRRegisterClass) {
+ switch (RC->getID()) {
+ case ARM::GPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::SPRRegisterClass) {
+ break;
+ case ARM::SPRRegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::DPRRegisterClass ||
- RC == ARM::DPR_VFP2RegisterClass ||
- RC == ARM::DPR_8RegisterClass) {
+ break;
+ case ARM::DPRRegClassID:
+ case ARM::DPR_VFP2RegClassID:
+ case ARM::DPR_8RegClassID:
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
- } else if (RC == ARM::QPRRegisterClass ||
- RC == ARM::QPR_VFP2RegisterClass ||
- RC == ARM::QPR_8RegisterClass) {
+ break;
+ case ARM::QPRRegClassID:
+ case ARM::QPR_VFP2RegClassID:
+ case ARM::QPR_8RegClassID:
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
- .addFrameIndex(FI).addImm(128)
+ .addFrameIndex(FI).addImm(16)
.addMemOperand(MMO));
} else {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg)
@@ -913,14 +896,16 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
.addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
.addMemOperand(MMO));
}
- } else if (RC == ARM::QQPRRegisterClass || RC == ARM::QQPR_VFP2RegisterClass){
+ break;
+ case ARM::QQPRRegClassID:
+ case ARM::QQPR_VFP2RegClassID:
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
- MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD2q32));
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD1d64Q));
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
- AddDefaultPred(MIB.addFrameIndex(FI).addImm(128).addMemOperand(MMO));
+ AddDefaultPred(MIB.addFrameIndex(FI).addImm(16).addMemOperand(MMO));
} else {
MachineInstrBuilder MIB =
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
@@ -932,21 +917,25 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
}
- } else {
- assert(RC == ARM::QQQQPRRegisterClass && "Unknown regclass!");
- MachineInstrBuilder MIB =
- AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
- .addFrameIndex(FI)
- .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
- .addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
- AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
+ break;
+ case ARM::QQQQPRRegClassID: {
+ MachineInstrBuilder MIB =
+ AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
+ .addFrameIndex(FI)
+ .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
+ .addMemOperand(MMO);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
+ AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
+ break;
+ }
+ default:
+ llvm_unreachable("Unknown regclass!");
}
}
@@ -960,223 +949,6 @@ ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
return &*MIB;
}
-MachineInstr *ARMBaseInstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = NULL;
- if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
- // If it is updating CPSR, then it cannot be folded.
- if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
- return NULL;
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- if (Opc == ARM::MOVr)
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
- else // ARM::t2MOVr
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- if (Opc == ARM::MOVr)
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef), DstSubReg)
- .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
- else // ARM::t2MOVr
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef), DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- } else if (Opc == ARM::tMOVgpr2gpr ||
- Opc == ARM::tMOVtgpr2gpr ||
- Opc == ARM::tMOVgpr2tgpr) {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
- }
- } else if (Opc == ARM::VMOVS) {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI)
- .addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVDneon) {
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
- }
- } else if (Opc == ARM::VMOVQ) {
- MachineFrameInfo &MFI = *MF.getFrameInfo();
- unsigned Pred = MI->getOperand(2).getImm();
- unsigned PredReg = MI->getOperand(3).getReg();
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- unsigned SrcSubReg = MI->getOperand(1).getSubReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- if (MFI.getObjectAlignment(FI) >= 16 &&
- getRegisterInfo().canRealignStack(MF)) {
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VST1q))
- .addFrameIndex(FI).addImm(128)
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addImm(Pred).addReg(PredReg);
- } else {
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTMQ))
- .addReg(SrcReg,
- getKillRegState(isKill) | getUndefRegState(isUndef),
- SrcSubReg)
- .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
- .addImm(Pred).addReg(PredReg);
- }
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- unsigned DstSubReg = MI->getOperand(0).getSubReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- if (MFI.getObjectAlignment(FI) >= 16 &&
- getRegisterInfo().canRealignStack(MF)) {
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLD1q))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(128).addImm(Pred).addReg(PredReg);
- } else {
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDMQ))
- .addReg(DstReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef),
- DstSubReg)
- .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
- .addImm(Pred).addReg(PredReg);
- }
- }
- }
-
- return NewMI;
-}
-
-MachineInstr*
-ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- // FIXME
- return 0;
-}
-
-bool
-ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- unsigned Opc = MI->getOpcode();
- if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
- // If it is updating CPSR, then it cannot be folded.
- return MI->getOperand(4).getReg() != ARM::CPSR ||
- MI->getOperand(4).isDead();
- } else if (Opc == ARM::tMOVgpr2gpr ||
- Opc == ARM::tMOVtgpr2gpr ||
- Opc == ARM::tMOVgpr2tgpr) {
- return true;
- } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD ||
- Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
- return true;
- }
-
- // FIXME: VMOVQQ and VMOVQQQQ?
-
- return false;
-}
-
/// Create a copy of a const pool value. Update CPI to the new index and return
/// the label UID.
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
@@ -1211,17 +983,12 @@ reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
- if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
- DestReg = TRI->getSubReg(DestReg, SubIdx);
- SubIdx = 0;
- }
-
+ const TargetRegisterInfo &TRI) const {
unsigned Opcode = Orig->getOpcode();
switch (Opcode) {
default: {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MI->getOperand(0).setReg(DestReg);
+ MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
break;
}
@@ -1237,9 +1004,6 @@ reMaterialize(MachineBasicBlock &MBB,
break;
}
}
-
- MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
}
MachineInstr *
@@ -1291,6 +1055,165 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
}
+/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
+/// determine if two loads are loading from the same base address. It should
+/// only return true if the base pointers are the same and the only differences
+/// between the two addresses is the offset. It also returns the offsets by
+/// reference.
+bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+ int64_t &Offset1,
+ int64_t &Offset2) const {
+ // Don't worry about Thumb: just ARM and Thumb2.
+ if (Subtarget.isThumb1Only()) return false;
+
+ if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
+ return false;
+
+ switch (Load1->getMachineOpcode()) {
+ default:
+ return false;
+ case ARM::LDR:
+ case ARM::LDRB:
+ case ARM::LDRD:
+ case ARM::LDRH:
+ case ARM::LDRSB:
+ case ARM::LDRSH:
+ case ARM::VLDRD:
+ case ARM::VLDRS:
+ case ARM::t2LDRi8:
+ case ARM::t2LDRDi8:
+ case ARM::t2LDRSHi8:
+ case ARM::t2LDRi12:
+ case ARM::t2LDRSHi12:
+ break;
+ }
+
+ switch (Load2->getMachineOpcode()) {
+ default:
+ return false;
+ case ARM::LDR:
+ case ARM::LDRB:
+ case ARM::LDRD:
+ case ARM::LDRH:
+ case ARM::LDRSB:
+ case ARM::LDRSH:
+ case ARM::VLDRD:
+ case ARM::VLDRS:
+ case ARM::t2LDRi8:
+ case ARM::t2LDRDi8:
+ case ARM::t2LDRSHi8:
+ case ARM::t2LDRi12:
+ case ARM::t2LDRSHi12:
+ break;
+ }
+
+ // Check if base addresses and chain operands match.
+ if (Load1->getOperand(0) != Load2->getOperand(0) ||
+ Load1->getOperand(4) != Load2->getOperand(4))
+ return false;
+
+ // Index should be Reg0.
+ if (Load1->getOperand(3) != Load2->getOperand(3))
+ return false;
+
+ // Determine the offsets.
+ if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
+ isa<ConstantSDNode>(Load2->getOperand(1))) {
+ Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
+ Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
+ return true;
+ }
+
+ return false;
+}
+
+/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
+/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
+/// be scheduled togther. On some targets if two loads are loading from
+/// addresses in the same cache line, it's better if they are scheduled
+/// together. This function takes two integers that represent the load offsets
+/// from the common base address. It returns true if it decides it's desirable
+/// to schedule the two loads together. "NumLoads" is the number of loads that
+/// have already been scheduled after Load1.
+bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const {
+ // Don't worry about Thumb: just ARM and Thumb2.
+ if (Subtarget.isThumb1Only()) return false;
+
+ assert(Offset2 > Offset1);
+
+ if ((Offset2 - Offset1) / 8 > 64)
+ return false;
+
+ if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
+ return false; // FIXME: overly conservative?
+
+ // Four loads in a row should be sufficient.
+ if (NumLoads >= 3)
+ return false;
+
+ return true;
+}
+
+bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const {
+ // Debug info is never a scheduling boundary. It's necessary to be explicit
+ // due to the special treatment of IT instructions below, otherwise a
+ // dbg_value followed by an IT will result in the IT instruction being
+ // considered a scheduling hazard, which is wrong. It should be the actual
+ // instruction preceding the dbg_value instruction(s), just like it is
+ // when debug info is not present.
+ if (MI->isDebugValue())
+ return false;
+
+ // Terminators and labels can't be scheduled around.
+ if (MI->getDesc().isTerminator() || MI->isLabel())
+ return true;
+
+ // Treat the start of the IT block as a scheduling boundary, but schedule
+ // t2IT along with all instructions following it.
+ // FIXME: This is a big hammer. But the alternative is to add all potential
+ // true and anti dependencies to IT block instructions as implicit operands
+ // to the t2IT instruction. The added compile time and complexity does not
+ // seem worth it.
+ MachineBasicBlock::const_iterator I = MI;
+ // Make sure to skip any dbg_value instructions
+ while (++I != MBB->end() && I->isDebugValue())
+ ;
+ if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
+ return true;
+
+ // Don't attempt to schedule around any instruction that defines
+ // a stack-oriented pointer, as it's unlikely to be profitable. This
+ // saves compile time, because it doesn't require every single
+ // stack slot reference to depend on the instruction that does the
+ // modification.
+ if (MI->definesRegister(ARM::SP))
+ return true;
+
+ return false;
+}
+
+bool ARMBaseInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+ if (!NumInstrs)
+ return false;
+ if (Subtarget.getCPUString() == "generic")
+ // Generic (and overly aggressive) if-conversion limits for testing.
+ return NumInstrs <= 10;
+ else if (Subtarget.hasV7Ops())
+ return NumInstrs <= 3;
+ return NumInstrs <= 2;
+}
+
+bool ARMBaseInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
+ MachineBasicBlock &FMBB, unsigned NumF) const {
+ return NumT && NumF && NumT <= 2 && NumF <= 2;
+}
+
/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index b566271..89a2db7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -116,11 +116,25 @@ namespace ARMII {
// Thumb format
ThumbFrm = 24 << FormShift,
- // NEON format
- NEONFrm = 25 << FormShift,
- NEONGetLnFrm = 26 << FormShift,
- NEONSetLnFrm = 27 << FormShift,
- NEONDupFrm = 28 << FormShift,
+ // Miscelleaneous format
+ MiscFrm = 25 << FormShift,
+
+ // NEON formats
+ NGetLnFrm = 26 << FormShift,
+ NSetLnFrm = 27 << FormShift,
+ NDupFrm = 28 << FormShift,
+ NLdStFrm = 29 << FormShift,
+ N1RegModImmFrm= 30 << FormShift,
+ N2RegFrm = 31 << FormShift,
+ NVCVTFrm = 32 << FormShift,
+ NVDupLnFrm = 33 << FormShift,
+ N2RegVShLFrm = 34 << FormShift,
+ N2RegVShRFrm = 35 << FormShift,
+ N3RegFrm = 36 << FormShift,
+ N3RegVShFrm = 37 << FormShift,
+ NVExtFrm = 38 << FormShift,
+ NVMulSLFrm = 39 << FormShift,
+ NVTBLFrm = 40 << FormShift,
//===------------------------------------------------------------------===//
// Misc flags.
@@ -213,7 +227,8 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
@@ -258,12 +273,10 @@ public:
virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -283,29 +296,51 @@ public:
const MDNode *MDPtr,
DebugLoc DL) const;
- virtual bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
-
virtual void reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const;
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1) const;
+
+ /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
+ /// determine if two loads are loading from the same base address. It should
+ /// only return true if the base pointers are the same and the only
+ /// differences between the two addresses is the offset. It also returns the
+ /// offsets by reference.
+ virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+ int64_t &Offset1, int64_t &Offset2)const;
+
+ /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
+ /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
+ /// be scheduled togther. On some targets if two loads are loading from
+ /// addresses in the same cache line, it's better if they are scheduled
+ /// together. This function takes two integers that represent the load offsets
+ /// from the common base address. It returns true if it decides it's desirable
+ /// to schedule the two loads together. "NumLoads" is the number of loads that
+ /// have already been scheduled after Load1.
+ virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const;
+
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
+
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumInstrs) const;
+
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,unsigned NumT,
+ MachineBasicBlock &FMBB,unsigned NumF) const;
+
+ virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
+ unsigned NumInstrs) const {
+ return NumInstrs && NumInstrs == 1;
+ }
};
static inline
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 82458d2..182bd99 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -170,56 +170,6 @@ ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
}
-const TargetRegisterClass* const *
-ARMBaseRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const ThumbCalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::tGPRRegClass,
- &ARM::tGPRRegClass,&ARM::tGPRRegClass,&ARM::tGPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const DarwinCalleeSavedRegClasses[] = {
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- static const TargetRegisterClass * const DarwinThumbCalleeSavedRegClasses[] ={
- &ARM::GPRRegClass, &ARM::tGPRRegClass, &ARM::tGPRRegClass,
- &ARM::tGPRRegClass, &ARM::tGPRRegClass, &ARM::GPRRegClass,
- &ARM::GPRRegClass, &ARM::GPRRegClass,
-
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
- 0
- };
-
- if (STI.isThumb1Only()) {
- return STI.isTargetDarwin()
- ? DarwinThumbCalleeSavedRegClasses : ThumbCalleeSavedRegClasses;
- }
- return STI.isTargetDarwin()
- ? DarwinCalleeSavedRegClasses : CalleeSavedRegClasses;
-}
-
BitVector ARMBaseRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
// FIXME: avoid re-calculating this everytime.
@@ -352,7 +302,7 @@ ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
}
bool
-ARMBaseRegisterInfo::canCombinedSubRegIndex(const TargetRegisterClass *RC,
+ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
SmallVectorImpl<unsigned> &SubIndices,
unsigned &NewSubIdx) const {
@@ -724,6 +674,15 @@ ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
I != E; ++I) {
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
if (!I->getOperand(i).isFI()) continue;
+
+ // When using ADDri to get the address of a stack object, 255 is the
+ // largest offset guaranteed to fit in the immediate offset.
+ if (I->getOpcode() == ARM::ADDri) {
+ Limit = std::min(Limit, (1U << 8) - 1);
+ break;
+ }
+
+ // Otherwise check the addressing mode.
switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
case ARMII::AddrMode3:
case ARMII::AddrModeT2_i8:
@@ -765,6 +724,7 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
SmallVector<unsigned, 4> UnspilledCS1GPRs;
SmallVector<unsigned, 4> UnspilledCS2GPRs;
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
// Spill R4 if Thumb2 function requires stack realignment - it will be used as
// scratch register.
@@ -780,7 +740,6 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Don't spill FP if the frame can be eliminated. This is determined
// by scanning the callee-save registers to see if any is used.
const unsigned *CSRegs = getCalleeSavedRegs();
- const TargetRegisterClass* const *CSRegClasses = getCalleeSavedRegClasses();
for (unsigned i = 0; CSRegs[i]; ++i) {
unsigned Reg = CSRegs[i];
bool Spilled = false;
@@ -798,50 +757,50 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
}
- if (CSRegClasses[i] == ARM::GPRRegisterClass ||
- CSRegClasses[i] == ARM::tGPRRegisterClass) {
- if (Spilled) {
- NumGPRSpills++;
+ if (!ARM::GPRRegisterClass->contains(Reg))
+ continue;
- if (!STI.isTargetDarwin()) {
- if (Reg == ARM::LR)
- LRSpilled = true;
- CS1Spilled = true;
- continue;
- }
+ if (Spilled) {
+ NumGPRSpills++;
- // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
- switch (Reg) {
- case ARM::LR:
+ if (!STI.isTargetDarwin()) {
+ if (Reg == ARM::LR)
LRSpilled = true;
- // Fallthrough
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- CS1Spilled = true;
- break;
- default:
- break;
- }
- } else {
- if (!STI.isTargetDarwin()) {
- UnspilledCS1GPRs.push_back(Reg);
- continue;
- }
+ CS1Spilled = true;
+ continue;
+ }
- switch (Reg) {
- case ARM::R4:
- case ARM::R5:
- case ARM::R6:
- case ARM::R7:
- case ARM::LR:
- UnspilledCS1GPRs.push_back(Reg);
- break;
- default:
- UnspilledCS2GPRs.push_back(Reg);
- break;
- }
+ // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
+ switch (Reg) {
+ case ARM::LR:
+ LRSpilled = true;
+ // Fallthrough
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ CS1Spilled = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (!STI.isTargetDarwin()) {
+ UnspilledCS1GPRs.push_back(Reg);
+ continue;
+ }
+
+ switch (Reg) {
+ case ARM::R4:
+ case ARM::R5:
+ case ARM::R6:
+ case ARM::R7:
+ case ARM::LR:
+ UnspilledCS1GPRs.push_back(Reg);
+ break;
+ default:
+ UnspilledCS2GPRs.push_back(Reg);
+ break;
}
}
}
@@ -862,9 +821,16 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// offset, make sure a register (or a spill slot) is available for the
// register scavenger. Note that if we're indexing off the frame pointer, the
// effective stack size is 4 bytes larger since the FP points to the stack
- // slot of the previous FP.
- bool BigStack = RS &&
- estimateStackSize(MF) + (hasFP(MF) ? 4 : 0) >= estimateRSStackSizeLimit(MF);
+ // slot of the previous FP. Also, if we have variable sized objects in the
+ // function, stack slot references will often be negative, and some of
+ // our instructions are positive-offset only, so conservatively consider
+ // that case to want a spill slot (or register) as well.
+ // FIXME: We could add logic to be more precise about negative offsets
+ // and which instructions will need a scratch register for them. Is it
+ // worth the effort and added fragility?
+ bool BigStack =
+ (RS && (estimateStackSize(MF) + (hasFP(MF) ? 4:0) >=
+ estimateRSStackSizeLimit(MF))) || MFI->hasVarSizedObjects();
bool ExtraCSSpill = false;
if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
@@ -957,7 +923,6 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
// closest to SP or frame pointer.
const TargetRegisterClass *RC = ARM::GPRRegisterClass;
- MachineFrameInfo *MFI = MF.getFrameInfo();
RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
RC->getAlignment(),
false));
@@ -1622,6 +1587,7 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = prior(MBB.end());
assert(MBBI->getDesc().isReturn() &&
"Can only insert epilog into returning blocks");
+ unsigned RetOpcode = MBBI->getOpcode();
DebugLoc dl = MBBI->getDebugLoc();
MachineFrameInfo *MFI = MF.getFrameInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -1696,6 +1662,39 @@ emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
}
+ if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
+ RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = prior(MBB.end());
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+
+ // Jump to label or value in register.
+ if (RetOpcode == ARM::TCRETURNdi) {
+ BuildMI(MBB, MBBI, dl,
+ TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ } else if (RetOpcode == ARM::TCRETURNdiND) {
+ BuildMI(MBB, MBBI, dl,
+ TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
+ addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ } else if (RetOpcode == ARM::TCRETURNri) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ } else if (RetOpcode == ARM::TCRETURNriND) {
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = prior(MBBI);
+ for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
+ NewMI->addOperand(MBBI->getOperand(i));
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ }
+
if (VARegSaveSize)
emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
}
diff --git a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index 2c9c82d..f7ee0d5 100644
--- a/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -69,9 +69,6 @@ public:
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
/// getMatchingSuperRegClass - Return a subclass of the specified register
@@ -81,14 +78,15 @@ public:
getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B, unsigned Idx) const;
- /// canCombinedSubRegIndex - Given a register class and a list of sub-register
- /// indices, return true if it's possible to combine the sub-register indices
- /// into one that corresponds to a larger sub-register. Return the new sub-
- /// register index by reference. Note the new index by be zero if the given
- /// sub-registers combined to form the whole register.
- virtual bool canCombinedSubRegIndex(const TargetRegisterClass *RC,
- SmallVectorImpl<unsigned> &SubIndices,
- unsigned &NewSubIdx) const;
+ /// canCombineSubRegIndices - Given a register class and a list of
+ /// subregister indices, return true if it's possible to combine the
+ /// subregister indices into one that corresponds to a larger
+ /// subregister. Return the new subregister index by reference. Note the
+ /// new index may be zero if the given subregisters can be combined to
+ /// form the whole register.
+ virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC,
+ SmallVectorImpl<unsigned> &SubIndices,
+ unsigned &NewSubIdx) const;
const TargetRegisterClass *getPointerRegClass(unsigned Kind = 0) const;
@@ -150,8 +148,8 @@ public:
virtual bool canSimplifyCallFramePseudos(MachineFunction &MF) const;
virtual void eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I) const;
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const;
virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, FrameIndexValue *Value = NULL,
diff --git a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
index f2730fc..7895cb0 100644
--- a/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMCodeEmitter.cpp
@@ -55,6 +55,7 @@ namespace {
const std::vector<MachineConstantPoolEntry> *MCPEs;
const std::vector<MachineJumpTableEntry> *MJTEs;
bool IsPIC;
+ bool IsThumb;
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<MachineModuleInfo>();
@@ -67,8 +68,8 @@ namespace {
: MachineFunctionPass(&ID), JTI(0),
II((const ARMInstrInfo *)tm.getInstrInfo()),
TD(tm.getTargetData()), TM(tm),
- MCE(mce), MCPEs(0), MJTEs(0),
- IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
+ MCE(mce), MCPEs(0), MJTEs(0),
+ IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
/// getBinaryCodeForInstr - This function, generated by the
/// CodeEmitterGenerator using TableGen, produces the binary encoding for
@@ -139,6 +140,12 @@ namespace {
void emitMiscInstruction(const MachineInstr &MI);
+ void emitNEONLaneInstruction(const MachineInstr &MI);
+ void emitNEONDupInstruction(const MachineInstr &MI);
+ void emitNEON1RegModImmInstruction(const MachineInstr &MI);
+ void emitNEON2RegInstruction(const MachineInstr &MI);
+ void emitNEON3RegInstruction(const MachineInstr &MI);
+
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MachineInstr &MI,const MachineOperand &MO);
@@ -147,7 +154,8 @@ namespace {
}
/// getMovi32Value - Return binary encoding of operand for movw/movt. If the
- /// machine operand requires relocation, record the relocation and return zero.
+ /// machine operand requires relocation, record the relocation and return
+ /// zero.
unsigned getMovi32Value(const MachineInstr &MI,const MachineOperand &MO,
unsigned Reloc);
unsigned getMovi32Value(const MachineInstr &MI, unsigned OpIdx,
@@ -193,6 +201,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
MJTEs = 0;
if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables();
IsPIC = TM.getRelocationModel() == Reloc::PIC_;
+ IsThumb = MF.getInfo<ARMFunctionInfo>()->isThumbFunction();
JTI->Initialize(MF, IsPIC);
MMI = &getAnalysis<MachineModuleInfo>();
MCE.setModuleInfo(MMI);
@@ -347,7 +356,7 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
MCE.processDebugLoc(MI.getDebugLoc(), true);
- NumEmitted++; // Keep track of the # of mi's emitted
+ ++NumEmitted; // Keep track of the # of mi's emitted
switch (MI.getDesc().TSFlags & ARMII::FormMask) {
default: {
llvm_unreachable("Unhandled instruction encoding format!");
@@ -407,6 +416,23 @@ void ARMCodeEmitter::emitInstruction(const MachineInstr &MI) {
case ARMII::VFPMiscFrm:
emitMiscInstruction(MI);
break;
+ // NEON instructions.
+ case ARMII::NGetLnFrm:
+ case ARMII::NSetLnFrm:
+ emitNEONLaneInstruction(MI);
+ break;
+ case ARMII::NDupFrm:
+ emitNEONDupInstruction(MI);
+ break;
+ case ARMII::N1RegModImmFrm:
+ emitNEON1RegModImmInstruction(MI);
+ break;
+ case ARMII::N2RegFrm:
+ emitNEON2RegInstruction(MI);
+ break;
+ case ARMII::N3RegFrm:
+ emitNEON3RegInstruction(MI);
+ break;
}
MCE.processDebugLoc(MI.getDebugLoc(), false);
}
@@ -1539,4 +1565,144 @@ void ARMCodeEmitter::emitMiscInstruction(const MachineInstr &MI) {
emitWordLE(Binary);
}
+static unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) {
+ unsigned RegD = MI.getOperand(OpIdx).getReg();
+ unsigned Binary = 0;
+ RegD = ARMRegisterInfo::getRegisterNumbering(RegD);
+ Binary |= (RegD & 0xf) << ARMII::RegRdShift;
+ Binary |= ((RegD >> 4) & 1) << ARMII::D_BitShift;
+ return Binary;
+}
+
+static unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) {
+ unsigned RegN = MI.getOperand(OpIdx).getReg();
+ unsigned Binary = 0;
+ RegN = ARMRegisterInfo::getRegisterNumbering(RegN);
+ Binary |= (RegN & 0xf) << ARMII::RegRnShift;
+ Binary |= ((RegN >> 4) & 1) << ARMII::N_BitShift;
+ return Binary;
+}
+
+static unsigned encodeNEONRm(const MachineInstr &MI, unsigned OpIdx) {
+ unsigned RegM = MI.getOperand(OpIdx).getReg();
+ unsigned Binary = 0;
+ RegM = ARMRegisterInfo::getRegisterNumbering(RegM);
+ Binary |= (RegM & 0xf);
+ Binary |= ((RegM >> 4) & 1) << ARMII::M_BitShift;
+ return Binary;
+}
+
+/// convertNEONDataProcToThumb - Convert the ARM mode encoding for a NEON
+/// data-processing instruction to the corresponding Thumb encoding.
+static unsigned convertNEONDataProcToThumb(unsigned Binary) {
+ assert((Binary & 0xfe000000) == 0xf2000000 &&
+ "not an ARM NEON data-processing instruction");
+ unsigned UBit = (Binary >> 24) & 1;
+ return 0xef000000 | (UBit << 28) | (Binary & 0xffffff);
+}
+
+void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) {
+ unsigned Binary = getBinaryCodeForInstr(MI);
+
+ unsigned RegTOpIdx, RegNOpIdx, LnOpIdx;
+ const TargetInstrDesc &TID = MI.getDesc();
+ if ((TID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
+ RegTOpIdx = 0;
+ RegNOpIdx = 1;
+ LnOpIdx = 2;
+ } else { // ARMII::NSetLnFrm
+ RegTOpIdx = 2;
+ RegNOpIdx = 0;
+ LnOpIdx = 3;
+ }
+
+ // Set the conditional execution predicate
+ Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
+
+ unsigned RegT = MI.getOperand(RegTOpIdx).getReg();
+ RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+ Binary |= (RegT << ARMII::RegRdShift);
+ Binary |= encodeNEONRn(MI, RegNOpIdx);
+
+ unsigned LaneShift;
+ if ((Binary & (1 << 22)) != 0)
+ LaneShift = 0; // 8-bit elements
+ else if ((Binary & (1 << 5)) != 0)
+ LaneShift = 1; // 16-bit elements
+ else
+ LaneShift = 2; // 32-bit elements
+
+ unsigned Lane = MI.getOperand(LnOpIdx).getImm() << LaneShift;
+ unsigned Opc1 = Lane >> 2;
+ unsigned Opc2 = Lane & 3;
+ assert((Opc1 & 3) == 0 && "out-of-range lane number operand");
+ Binary |= (Opc1 << 21);
+ Binary |= (Opc2 << 5);
+
+ emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEONDupInstruction(const MachineInstr &MI) {
+ unsigned Binary = getBinaryCodeForInstr(MI);
+
+ // Set the conditional execution predicate
+ Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
+
+ unsigned RegT = MI.getOperand(1).getReg();
+ RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+ Binary |= (RegT << ARMII::RegRdShift);
+ Binary |= encodeNEONRn(MI, 0);
+ emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON1RegModImmInstruction(const MachineInstr &MI) {
+ unsigned Binary = getBinaryCodeForInstr(MI);
+ // Destination register is encoded in Dd.
+ Binary |= encodeNEONRd(MI, 0);
+ // Immediate fields: Op, Cmode, I, Imm3, Imm4
+ unsigned Imm = MI.getOperand(1).getImm();
+ unsigned Op = (Imm >> 12) & 1;
+ unsigned Cmode = (Imm >> 8) & 0xf;
+ unsigned I = (Imm >> 7) & 1;
+ unsigned Imm3 = (Imm >> 4) & 0x7;
+ unsigned Imm4 = Imm & 0xf;
+ Binary |= (I << 24) | (Imm3 << 16) | (Cmode << 8) | (Op << 5) | Imm4;
+ if (IsThumb)
+ Binary = convertNEONDataProcToThumb(Binary);
+ emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) {
+ const TargetInstrDesc &TID = MI.getDesc();
+ unsigned Binary = getBinaryCodeForInstr(MI);
+ // Destination register is encoded in Dd; source register in Dm.
+ unsigned OpIdx = 0;
+ Binary |= encodeNEONRd(MI, OpIdx++);
+ if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+ ++OpIdx;
+ Binary |= encodeNEONRm(MI, OpIdx);
+ if (IsThumb)
+ Binary = convertNEONDataProcToThumb(Binary);
+ // FIXME: This does not handle VDUPfdf or VDUPfqf.
+ emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON3RegInstruction(const MachineInstr &MI) {
+ const TargetInstrDesc &TID = MI.getDesc();
+ unsigned Binary = getBinaryCodeForInstr(MI);
+ // Destination register is encoded in Dd; source registers in Dn and Dm.
+ unsigned OpIdx = 0;
+ Binary |= encodeNEONRd(MI, OpIdx++);
+ if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+ ++OpIdx;
+ Binary |= encodeNEONRn(MI, OpIdx++);
+ if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+ ++OpIdx;
+ Binary |= encodeNEONRm(MI, OpIdx);
+ if (IsThumb)
+ Binary = convertNEONDataProcToThumb(Binary);
+ // FIXME: This does not handle VMOVDneon or VMOVQ.
+ emitWordLE(Binary);
+}
+
#include "ARMGenCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index 13d8b74..65a3da6 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -337,7 +337,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
if (CPChange && ++NoCPIters > 30)
llvm_unreachable("Constant Island pass failed to converge!");
DEBUG(dumpBBs());
-
+
// Clear NewWaterList now. If we split a block for branches, it should
// appear as "new water" for the next iteration of constant pool placement.
NewWaterList.clear();
@@ -361,8 +361,8 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &MF) {
// After a while, this might be made debug-only, but it is not expensive.
verify(MF);
- // If LR has been forced spilled and no far jumps (i.e. BL) has been issued.
- // Undo the spill / restore of LR if possible.
+ // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
+ // undo the spill / restore of LR if possible.
if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
MadeChange |= UndoLRSpillRestore();
@@ -407,7 +407,7 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
std::vector<CPEntry> CPEs;
CPEs.push_back(CPEntry(CPEMI, i));
CPEntries.push_back(CPEs);
- NumCPEs++;
+ ++NumCPEs;
DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i
<< "\n");
}
@@ -418,7 +418,8 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
static bool BBHasFallthrough(MachineBasicBlock *MBB) {
// Get the next machine basic block in the function.
MachineFunction::iterator MBBI = MBB;
- if (llvm::next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
+ // Can't fall off end of function.
+ if (llvm::next(MBBI) == MBB->getParent()->end())
return false;
MachineBasicBlock *NextBB = llvm::next(MBBI);
@@ -491,6 +492,8 @@ void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
unsigned MBBSize = 0;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
+ if (I->isDebugValue())
+ continue;
// Add instruction size to MBBSize.
MBBSize += TII->GetInstSizeInBytes(I);
@@ -722,7 +725,7 @@ MachineBasicBlock *ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
// correspond to anything in the source.
unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
- NumSplit++;
+ ++NumSplit;
// Update the CFG. All succs of OrigBB are now succs of NewBB.
while (!OrigBB->succ_empty()) {
@@ -945,7 +948,7 @@ bool ARMConstantIslands::DecrementOldEntry(unsigned CPI, MachineInstr *CPEMI) {
if (--CPE->RefCount == 0) {
RemoveDeadCPEMI(CPEMI);
CPE->CPEMI = NULL;
- NumCPEs--;
+ --NumCPEs;
return true;
}
return false;
@@ -1246,7 +1249,7 @@ bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &MF,
U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
.addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
- NumCPEs++;
+ ++NumCPEs;
BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
// Compensate for .align 2 in thumb mode.
@@ -1369,7 +1372,7 @@ ARMConstantIslands::FixUpUnconditionalBr(MachineFunction &MF, ImmBranch &Br) {
BBSizes[MBB->getNumber()] += 2;
AdjustBBOffsetsAfter(MBB, 2);
HasFarJump = true;
- NumUBrFixed++;
+ ++NumUBrFixed;
DEBUG(errs() << " Changed B to long jump " << *MI);
@@ -1402,7 +1405,7 @@ ARMConstantIslands::FixUpConditionalBr(MachineFunction &MF, ImmBranch &Br) {
MachineInstr *BMI = &MBB->back();
bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
- NumCBrFixed++;
+ ++NumCBrFixed;
if (BMI != MI) {
if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
BMI->getOpcode() == Br.UncondBr) {
@@ -1621,7 +1624,7 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
// constantpool tables?
MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
if (MJTI == 0) return false;
-
+
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
MachineInstr *MI = T2JumpTables[i];
@@ -1658,15 +1661,25 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
continue;
unsigned IdxReg = MI->getOperand(1).getReg();
bool IdxRegKill = MI->getOperand(1).isKill();
+
+ // Scan backwards to find the instruction that defines the base
+ // register. Due to post-RA scheduling, we can't count on it
+ // immediately preceding the branch instruction.
MachineBasicBlock::iterator PrevI = MI;
- if (PrevI == MBB->begin())
+ MachineBasicBlock::iterator B = MBB->begin();
+ while (PrevI != B && !PrevI->definesRegister(BaseReg))
+ --PrevI;
+
+ // If for some reason we didn't find it, we can't do anything, so
+ // just skip this one.
+ if (!PrevI->definesRegister(BaseReg))
continue;
- MachineInstr *AddrMI = --PrevI;
+ MachineInstr *AddrMI = PrevI;
bool OptOk = true;
- // Examine the instruction that calculate the jumptable entry address.
- // If it's not the one just before the t2BR_JT, we won't delete it, then
- // it's not worth doing the optimization.
+ // Examine the instruction that calculates the jumptable entry address.
+ // Make sure it only defines the base register and kills any uses
+ // other than the index register.
for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) {
const MachineOperand &MO = AddrMI->getOperand(k);
if (!MO.isReg() || !MO.getReg())
@@ -1683,9 +1696,14 @@ bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
if (!OptOk)
continue;
- // The previous instruction should be a tLEApcrel or t2LEApcrelJT, we want
+ // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction
+ // that gave us the initial base register definition.
+ for (--PrevI; PrevI != B && !PrevI->definesRegister(BaseReg); --PrevI)
+ ;
+
+ // The instruction should be a tLEApcrel or t2LEApcrelJT; we want
// to delete it as well.
- MachineInstr *LeaMI = --PrevI;
+ MachineInstr *LeaMI = PrevI;
if ((LeaMI->getOpcode() != ARM::tLEApcrelJT &&
LeaMI->getOpcode() != ARM::t2LEApcrelJT) ||
LeaMI->getOperand(0).getReg() != BaseReg)
@@ -1729,7 +1747,7 @@ bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
if (MJTI == 0) return false;
-
+
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
MachineInstr *MI = T2JumpTables[i];
@@ -1769,7 +1787,7 @@ AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
{
MachineFunction &MF = *BB->getParent();
- // If it's the destination block is terminated by an unconditional branch,
+ // If the destination block is terminated by an unconditional branch,
// try to move it; otherwise, create a new block following the jump
// table that branches back to the actual target. This is a very simple
// heuristic. FIXME: We can definitely improve it.
diff --git a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
index 6f4eddf..3119b54 100644
--- a/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/ARM/ARMConstantPoolValue.h
@@ -15,6 +15,7 @@
#define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
#include "llvm/CodeGen/MachineConstantPool.h"
+#include <cstddef>
namespace llvm {
diff --git a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index c87f5d7..9c62597 100644
--- a/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -144,13 +144,15 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
MachineInstrBuilder Even =
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::VMOVQ))
- .addReg(EvenDst, getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(EvenSrc, getKillRegState(SrcIsKill)));
+ .addReg(EvenDst,
+ getDefRegState(true) | getDeadRegState(DstIsDead))
+ .addReg(EvenSrc, getKillRegState(SrcIsKill)));
MachineInstrBuilder Odd =
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(ARM::VMOVQ))
- .addReg(OddDst, getDefRegState(true) | getDeadRegState(DstIsDead))
- .addReg(OddSrc, getKillRegState(SrcIsKill)));
+ .addReg(OddDst,
+ getDefRegState(true) | getDeadRegState(DstIsDead))
+ .addReg(OddSrc, getKillRegState(SrcIsKill)));
TransferImpOps(MI, Even, Odd);
MI.eraseFromParent();
Modified = true;
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 9baef6b..c84d3ff 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "arm-isel"
#include "ARM.h"
#include "ARMAddressingModes.h"
#include "ARMTargetMachine.h"
@@ -35,11 +36,6 @@
using namespace llvm;
-static cl::opt<bool>
-UseRegSeq("neon-reg-sequence", cl::Hidden,
- cl::desc("Use reg_sequence to model ld / st of multiple neon regs"),
- cl::init(true));
-
//===--------------------------------------------------------------------===//
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
/// instructions for SelectionDAG operations.
@@ -147,6 +143,11 @@ private:
unsigned *DOpcodes, unsigned *QOpcodes0,
unsigned *QOpcodes1);
+ /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
+ /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
+ /// generated to force the table registers to be consecutive.
+ SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
+
/// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
@@ -173,24 +174,17 @@ private:
char ConstraintCode,
std::vector<SDValue> &OutOps);
- /// PairDRegs - Form a quad register from a pair of D registers.
- ///
+ // Form pairs of consecutive S, D, or Q registers.
+ SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
-
- /// PairDRegs - Form a quad register pair from a pair of Q registers.
- ///
SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
- /// QuadDRegs - Form a quad register pair from a quad of D registers.
- ///
+ // Form sequences of 4 consecutive S, D, or Q registers.
+ SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
-
- /// QuadQRegs - Form 4 consecutive Q registers.
- ///
SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
- /// OctoDRegs - Form 8 consecutive D registers.
- ///
+ // Form sequences of 8 consecutive D registers.
SDNode *OctoDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3,
SDValue V4, SDValue V5, SDValue V6, SDValue V7);
};
@@ -544,10 +538,9 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDNode *Op, SDValue N,
bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N,
SDValue &Base, SDValue &Offset){
// FIXME dl should come from the parent load or store, not the address
- DebugLoc dl = Op->getDebugLoc();
if (N.getOpcode() != ISD::ADD) {
ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
- if (!NC || NC->getZExtValue() != 0)
+ if (!NC || !NC->isNullValue())
return false;
Base = Offset = N;
@@ -788,8 +781,9 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
if (N.getOpcode() == ISD::ADD) {
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
int RHSC = (int)RHS->getZExtValue();
+ // 8 bits.
if (((RHSC & 0x3) == 0) &&
- ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
+ ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) {
Base = N.getOperand(0);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
@@ -798,7 +792,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
} else if (N.getOpcode() == ISD::SUB) {
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
int RHSC = (int)RHS->getZExtValue();
- if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
+ // 8 bits.
+ if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) {
Base = N.getOperand(0);
OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
return true;
@@ -960,22 +955,24 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
return NULL;
}
+/// PairSRegs - Form a D register from a pair of S registers.
+///
+SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
+ DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
+ const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
+}
+
/// PairDRegs - Form a quad register from a pair of D registers.
///
SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
DebugLoc dl = V0.getNode()->getDebugLoc();
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
- if (llvm::ModelWithRegSequence()) {
- const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
- }
- SDValue Undef =
- SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0);
- SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, Undef, V0, SubReg0);
- return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, SDValue(Pair, 0), V1, SubReg1);
+ const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
}
/// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
@@ -988,6 +985,19 @@ SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
}
+/// QuadSRegs - Form 4 consecutive S registers.
+///
+SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
+ SDValue V2, SDValue V3) {
+ DebugLoc dl = V0.getNode()->getDebugLoc();
+ SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
+ SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
+ SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
+ SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
+ const SDValue Ops[] = { V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, SubReg3 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 8);
+}
+
/// QuadDRegs - Form 4 consecutive D registers.
///
SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
@@ -1088,7 +1098,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
std::vector<EVT> ResTys(NumVecs, VT);
ResTys.push_back(MVT::Other);
SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5);
- if (!llvm::ModelWithRegSequence() || NumVecs < 2)
+ if (NumVecs < 2)
return VLd;
SDValue RegSeq;
@@ -1129,24 +1139,17 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
Chain = SDValue(VLd, 2 * NumVecs);
// Combine the even and odd subregs to produce the result.
- if (llvm::ModelWithRegSequence()) {
- if (NumVecs == 1) {
- SDNode *Q = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1));
- ReplaceUses(SDValue(N, 0), SDValue(Q, 0));
- } else {
- SDValue QQ = SDValue(QuadDRegs(MVT::v4i64,
- SDValue(VLd, 0), SDValue(VLd, 1),
- SDValue(VLd, 2), SDValue(VLd, 3)), 0);
- SDValue Q0 = CurDAG->getTargetExtractSubreg(ARM::qsub_0, dl, VT, QQ);
- SDValue Q1 = CurDAG->getTargetExtractSubreg(ARM::qsub_1, dl, VT, QQ);
- ReplaceUses(SDValue(N, 0), Q0);
- ReplaceUses(SDValue(N, 1), Q1);
- }
+ if (NumVecs == 1) {
+ SDNode *Q = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1));
+ ReplaceUses(SDValue(N, 0), SDValue(Q, 0));
} else {
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
+ SDValue QQ = SDValue(QuadDRegs(MVT::v4i64,
+ SDValue(VLd, 0), SDValue(VLd, 1),
+ SDValue(VLd, 2), SDValue(VLd, 3)), 0);
+ SDValue Q0 = CurDAG->getTargetExtractSubreg(ARM::qsub_0, dl, VT, QQ);
+ SDValue Q1 = CurDAG->getTargetExtractSubreg(ARM::qsub_1, dl, VT, QQ);
+ ReplaceUses(SDValue(N, 0), Q0);
+ ReplaceUses(SDValue(N, 1), Q1);
}
} else {
// Otherwise, quad registers are loaded with two separate instructions,
@@ -1169,37 +1172,27 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 6);
Chain = SDValue(VLdB, NumVecs+1);
- if (llvm::ModelWithRegSequence()) {
- SDValue V0 = SDValue(VLdA, 0);
- SDValue V1 = SDValue(VLdB, 0);
- SDValue V2 = SDValue(VLdA, 1);
- SDValue V3 = SDValue(VLdB, 1);
- SDValue V4 = SDValue(VLdA, 2);
- SDValue V5 = SDValue(VLdB, 2);
- SDValue V6 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT),
- 0)
- : SDValue(VLdA, 3);
- SDValue V7 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT),
- 0)
- : SDValue(VLdB, 3);
- SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V0, V1, V2, V3,
- V4, V5, V6, V7), 0);
-
- // Extract out the 3 / 4 Q registers.
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
- dl, VT, RegSeq);
- ReplaceUses(SDValue(N, Vec), Q);
- }
- } else {
- // Combine the even and odd subregs to produce the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
+ SDValue V0 = SDValue(VLdA, 0);
+ SDValue V1 = SDValue(VLdB, 0);
+ SDValue V2 = SDValue(VLdA, 1);
+ SDValue V3 = SDValue(VLdB, 1);
+ SDValue V4 = SDValue(VLdA, 2);
+ SDValue V5 = SDValue(VLdB, 2);
+ SDValue V6 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 0)
+ : SDValue(VLdA, 3);
+ SDValue V7 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 0)
+ : SDValue(VLdB, 3);
+ SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V0, V1, V2, V3,
+ V4, V5, V6, V7), 0);
+
+ // Extract out the 3 / 4 Q registers.
+ assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+ SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
+ dl, VT, RegSeq);
+ ReplaceUses(SDValue(N, Vec), Q);
}
}
ReplaceUses(SDValue(N, NumVecs), Chain);
@@ -1209,7 +1202,7 @@ SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs,
SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
unsigned *DOpcodes, unsigned *QOpcodes0,
unsigned *QOpcodes1) {
- assert(NumVecs >=1 && NumVecs <= 4 && "VST NumVecs out-of-range");
+ assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
DebugLoc dl = N->getDebugLoc();
SDValue MemAddr, Align;
@@ -1247,7 +1240,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
Ops.push_back(Align);
if (is64BitVector) {
- if (llvm::ModelWithRegSequence() && NumVecs >= 2) {
+ if (NumVecs >= 2) {
SDValue RegSeq;
SDValue V0 = N->getOperand(0+3);
SDValue V1 = N->getOperand(1+3);
@@ -1292,7 +1285,7 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
// Quad registers are directly supported for VST1 and VST2,
// storing pairs of D regs.
unsigned Opc = QOpcodes0[OpcodeIndex];
- if (llvm::ModelWithRegSequence() && NumVecs == 2) {
+ if (NumVecs == 2) {
// First extract the pair of Q registers.
SDValue Q0 = N->getOperand(3);
SDValue Q1 = N->getOperand(4);
@@ -1330,76 +1323,48 @@ SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs,
// Otherwise, quad registers are stored with two separate instructions,
// where one stores the even registers and the other stores the odd registers.
- if (llvm::ModelWithRegSequence()) {
- // Form the QQQQ REG_SEQUENCE.
- SDValue V[8];
- for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
- V[i] = CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
- N->getOperand(Vec+3));
- V[i+1] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
- N->getOperand(Vec+3));
- }
- if (NumVecs == 3)
- V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
-
- SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
- V[4], V[5], V[6], V[7]), 0);
-
- // Store the even D registers.
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- Ops.push_back(Reg0); // post-access address offset
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec*2, dl,
- RegVT, RegSeq));
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStA, 1);
- // Store the odd D registers.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1+Vec*2, dl,
- RegVT, RegSeq);
- Ops[NumVecs+5] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
- } else {
- Ops.push_back(Reg0); // post-access address offset
-
- // Store the even subregs.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
- N->getOperand(Vec+3)));
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStA, 1);
-
- // Store the odd subregs.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
- N->getOperand(Vec+3));
- Ops[NumVecs+5] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
- }
+ // Form the QQQQ REG_SEQUENCE.
+ SDValue V[8];
+ for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
+ V[i] = CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
+ N->getOperand(Vec+3));
+ V[i+1] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
+ N->getOperand(Vec+3));
+ }
+ if (NumVecs == 3)
+ V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+
+ SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
+ V[4], V[5], V[6], V[7]), 0);
+
+ // Store the even D registers.
+ assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
+ Ops.push_back(Reg0); // post-access address offset
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec*2, dl,
+ RegVT, RegSeq));
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0); // predicate register
+ Ops.push_back(Chain);
+ unsigned Opc = QOpcodes0[OpcodeIndex];
+ SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+ MVT::Other, Ops.data(), NumVecs+6);
+ Chain = SDValue(VStA, 1);
+
+ // Store the odd D registers.
+ Ops[0] = SDValue(VStA, 0); // MemAddr
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1+Vec*2, dl,
+ RegVT, RegSeq);
+ Ops[NumVecs+5] = Chain;
+ Opc = QOpcodes1[OpcodeIndex];
+ SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+ MVT::Other, Ops.data(), NumVecs+6);
+ Chain = SDValue(VStB, 1);
+ ReplaceUses(SDValue(N, 0), Chain);
+ return NULL;
}
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
@@ -1421,13 +1386,11 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
// Quad registers are handled by load/store of subregs. Find the subreg info.
unsigned NumElts = 0;
- int SubregIdx = 0;
bool Even = false;
EVT RegVT = VT;
if (!is64BitVector) {
RegVT = GetNEONSubregVT(VT);
NumElts = RegVT.getVectorNumElements();
- SubregIdx = (Lane < NumElts) ? ARM::dsub_0 : ARM::dsub_1;
Even = Lane < NumElts;
}
@@ -1455,35 +1418,26 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
unsigned Opc = 0;
if (is64BitVector) {
Opc = DOpcodes[OpcodeIndex];
- if (llvm::ModelWithRegSequence()) {
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
-
- // Now extract the D registers back out.
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT,
- RegSeq));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT,
- RegSeq));
- if (NumVecs > 2)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT,
- RegSeq));
- if (NumVecs > 3)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT,
- RegSeq));
+ SDValue RegSeq;
+ SDValue V0 = N->getOperand(0+3);
+ SDValue V1 = N->getOperand(1+3);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
} else {
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(N->getOperand(Vec+3));
+ SDValue V2 = N->getOperand(2+3);
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : N->getOperand(3+3);
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
}
+
+ // Now extract the D registers back out.
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, RegSeq));
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, RegSeq));
+ if (NumVecs > 2)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT,RegSeq));
+ if (NumVecs > 3)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT,RegSeq));
} else {
// Check if this is loading the even or odd subreg of a Q register.
if (Lane < NumElts) {
@@ -1493,31 +1447,24 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
Opc = QOpcodes1[OpcodeIndex];
}
- if (llvm::ModelWithRegSequence()) {
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
- }
-
- // Extract the subregs of the input vector.
- unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT,
- RegSeq));
+ SDValue RegSeq;
+ SDValue V0 = N->getOperand(0+3);
+ SDValue V1 = N->getOperand(1+3);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
} else {
- // Extract the subregs of the input vector.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
- N->getOperand(Vec+3)));
+ SDValue V2 = N->getOperand(2+3);
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : N->getOperand(3+3);
+ RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
}
+
+ // Extract the subregs of the input vector.
+ unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT,
+ RegSeq));
}
Ops.push_back(getI32Imm(Lane));
Ops.push_back(Pred);
@@ -1531,76 +1478,97 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
ResTys.push_back(MVT::Other);
SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(),NumVecs+6);
- if (llvm::ModelWithRegSequence()) {
- // Form a REG_SEQUENCE to force register allocation.
- SDValue RegSeq;
- if (is64BitVector) {
- SDValue V0 = SDValue(VLdLn, 0);
- SDValue V1 = SDValue(VLdLn, 1);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = SDValue(VLdLn, 2);
- // If it's a vld3, form a quad D-register but discard the last part.
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : SDValue(VLdLn, 3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
+ // Form a REG_SEQUENCE to force register allocation.
+ SDValue RegSeq;
+ if (is64BitVector) {
+ SDValue V0 = SDValue(VLdLn, 0);
+ SDValue V1 = SDValue(VLdLn, 1);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
} else {
- // For 128-bit vectors, take the 64-bit results of the load and insert them
- // as subregs into the result.
- SDValue V[8];
- for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
- if (Even) {
- V[i] = SDValue(VLdLn, Vec);
- V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- } else {
- V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- V[i+1] = SDValue(VLdLn, Vec);
- }
+ SDValue V2 = SDValue(VLdLn, 2);
+ // If it's a vld3, form a quad D-register but discard the last part.
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : SDValue(VLdLn, 3);
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
+ }
+ } else {
+ // For 128-bit vectors, take the 64-bit results of the load and insert
+ // them as subregs into the result.
+ SDValue V[8];
+ for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
+ if (Even) {
+ V[i] = SDValue(VLdLn, Vec);
+ V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+ } else {
+ V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+ V[i+1] = SDValue(VLdLn, Vec);
}
- if (NumVecs == 3)
- V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
-
- if (NumVecs == 2)
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0);
- else
- RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
- V[4], V[5], V[6], V[7]), 0);
}
+ if (NumVecs == 3)
+ V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- ReplaceUses(SDValue(N, Vec),
- CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq));
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs));
- return NULL;
- }
-
- // For a 64-bit vector load to D registers, nothing more needs to be done.
- if (is64BitVector)
- return VLdLn;
-
- // For 128-bit vectors, take the 64-bit results of the load and insert them
- // as subregs into the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
- N->getOperand(Vec+3),
- SDValue(VLdLn, Vec));
- ReplaceUses(SDValue(N, Vec), QuadVec);
+ if (NumVecs == 2)
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0);
+ else
+ RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
+ V[4], V[5], V[6], V[7]), 0);
}
- Chain = SDValue(VLdLn, NumVecs);
- ReplaceUses(SDValue(N, NumVecs), Chain);
+ assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
+ assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq));
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs));
return NULL;
}
+SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
+ unsigned Opc) {
+ assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
+ DebugLoc dl = N->getDebugLoc();
+ EVT VT = N->getValueType(0);
+ unsigned FirstTblReg = IsExt ? 2 : 1;
+
+ // Form a REG_SEQUENCE to force register allocation.
+ SDValue RegSeq;
+ SDValue V0 = N->getOperand(FirstTblReg + 0);
+ SDValue V1 = N->getOperand(FirstTblReg + 1);
+ if (NumVecs == 2)
+ RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
+ else {
+ SDValue V2 = N->getOperand(FirstTblReg + 2);
+ // If it's a vtbl3, form a quad D-register and leave the last part as
+ // an undef.
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
+ : N->getOperand(FirstTblReg + 3);
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
+ }
+
+ // Now extract the D registers back out.
+ SmallVector<SDValue, 6> Ops;
+ if (IsExt)
+ Ops.push_back(N->getOperand(1));
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, RegSeq));
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, RegSeq));
+ if (NumVecs > 2)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT, RegSeq));
+ if (NumVecs > 3)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT, RegSeq));
+
+ Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
+ Ops.push_back(getAL(CurDAG)); // predicate
+ Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
+ return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
+}
+
SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
bool isSigned) {
if (!Subtarget->hasV6T2Ops())
@@ -1954,8 +1922,8 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
- SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0 };
- return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 5);
+ SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
+ return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
@@ -2015,7 +1983,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
+ return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
@@ -2029,7 +1997,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
if (Subtarget->isThumb()) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
- return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
+ return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
} else {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
@@ -2211,6 +2179,22 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
}
+ case ARMISD::BUILD_VECTOR: {
+ EVT VecVT = N->getValueType(0);
+ EVT EltVT = VecVT.getVectorElementType();
+ unsigned NumElts = VecVT.getVectorNumElements();
+ if (EltVT.getSimpleVT() == MVT::f64) {
+ assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
+ return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
+ }
+ assert(EltVT.getSimpleVT() == MVT::f32 &&
+ "unexpected type for BUILD_VECTOR");
+ if (NumElts == 2)
+ return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
+ assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
+ return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
+ N->getOperand(2), N->getOperand(3));
+ }
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: {
@@ -2342,6 +2326,29 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
break;
}
+ case ISD::INTRINSIC_WO_CHAIN: {
+ unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
+ switch (IntNo) {
+ default:
+ break;
+
+ case Intrinsic::arm_neon_vtbl2:
+ return SelectVTBL(N, false, 2, ARM::VTBL2);
+ case Intrinsic::arm_neon_vtbl3:
+ return SelectVTBL(N, false, 3, ARM::VTBL3);
+ case Intrinsic::arm_neon_vtbl4:
+ return SelectVTBL(N, false, 4, ARM::VTBL4);
+
+ case Intrinsic::arm_neon_vtbx2:
+ return SelectVTBL(N, true, 2, ARM::VTBX2);
+ case Intrinsic::arm_neon_vtbx3:
+ return SelectVTBL(N, true, 3, ARM::VTBX3);
+ case Intrinsic::arm_neon_vtbx4:
+ return SelectVTBL(N, true, 4, ARM::VTBX4);
+ }
+ break;
+ }
+
case ISD::CONCAT_VECTORS:
return SelectConcatVector(N);
}
@@ -2367,9 +2374,3 @@ FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
CodeGenOpt::Level OptLevel) {
return new ARMDAGToDAGISel(TM, OptLevel);
}
-
-/// ModelWithRegSequence - Return true if isel should use REG_SEQUENCE to model
-/// operations involving sub-registers.
-bool llvm::ModelWithRegSequence() {
- return UseRegSeq;
-}
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b8126a3..0091df7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "arm-isel"
#include "ARM.h"
#include "ARMAddressingModes.h"
#include "ARMConstantPoolValue.h"
@@ -40,6 +41,7 @@
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/VectorExtras.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
@@ -47,9 +49,27 @@
#include <sstream>
using namespace llvm;
+STATISTIC(NumTailCalls, "Number of tail calls");
+
+// This option should go away when tail calls fully work.
+static cl::opt<bool>
+EnableARMTailCalls("arm-tail-calls", cl::Hidden,
+ cl::desc("Generate tail calls (TEMPORARY OPTION)."),
+ cl::init(true));
+
static cl::opt<bool>
EnableARMLongCalls("arm-long-calls", cl::Hidden,
- cl::desc("Generate calls via indirect call instructions."),
+ cl::desc("Generate calls via indirect call instructions"),
+ cl::init(false));
+
+static cl::opt<bool>
+ARMInterworking("arm-interworking", cl::Hidden,
+ cl::desc("Enable / disable ARM interworking (for debugging only)"),
+ cl::init(true));
+
+static cl::opt<bool>
+EnableARMCodePlacement("arm-code-placement", cl::Hidden,
+ cl::desc("Enable code placement pass for ARM"),
cl::init(false));
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
@@ -94,10 +114,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
}
setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
- if (llvm::ModelWithRegSequence())
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
- else
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
@@ -393,13 +410,57 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// doesn't yet know how to not do that for SjLj.
setExceptionSelectorRegister(ARM::R0);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
-
- // If the subtarget does not have extract instructions, sign_extend_inreg
- // needs to be expanded. Extract is available in ARM mode on v6 and up,
- // and on most Thumb2 implementations.
- if ((!Subtarget->isThumb() && !Subtarget->hasV6Ops())
- || (Subtarget->isThumb2() && !Subtarget->hasT2ExtractPack())) {
+ // Handle atomics directly for ARMv[67] (except for Thumb1), otherwise
+ // use the default expansion.
+ bool canHandleAtomics =
+ (Subtarget->hasV7Ops() ||
+ (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only()));
+ if (canHandleAtomics) {
+ // membarrier needs custom lowering; the rest are legal and handled
+ // normally.
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ } else {
+ // Set them all for expansion, which will force libcalls.
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
+ // Since the libcalls include locking, fold in the fences
+ setShouldFoldAtomicFences(true);
+ }
+ // 64-bit versions are always libcalls (for now)
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Expand);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand);
+
+ // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
+ if (!Subtarget->hasV6Ops()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
}
@@ -412,8 +473,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
- setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+ if (Subtarget->isTargetDarwin()) {
+ setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
+ setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+ }
setOperationAction(ISD::SETCC, MVT::i32, Expand);
setOperationAction(ISD::SETCC, MVT::f32, Expand);
@@ -474,28 +537,14 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
else
setSchedulingPreference(Sched::Hybrid);
- // FIXME: If-converter should use instruction latency to determine
- // profitability rather than relying on fixed limits.
- if (Subtarget->getCPUString() == "generic") {
- // Generic (and overly aggressive) if-conversion limits.
- setIfCvtBlockSizeLimit(10);
- setIfCvtDupBlockSizeLimit(2);
- } else if (Subtarget->hasV7Ops()) {
- setIfCvtBlockSizeLimit(3);
- setIfCvtDupBlockSizeLimit(1);
- } else if (Subtarget->hasV6Ops()) {
- setIfCvtBlockSizeLimit(2);
- setIfCvtDupBlockSizeLimit(1);
- } else {
- setIfCvtBlockSizeLimit(3);
- setIfCvtDupBlockSizeLimit(2);
- }
-
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
- // Do not enable CodePlacementOpt for now: it currently runs after the
- // ARMConstantIslandPass and messes up branch relaxation and placement
- // of constant islands.
- // benefitFromCodePlacementOpt = true;
+
+ // On ARM arguments smaller than 4 bytes are extended, so all arguments
+ // are at least 4 bytes aligned.
+ setMinStackArgumentAlignment(4);
+
+ if (EnableARMCodePlacement)
+ benefitFromCodePlacementOpt = true;
}
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -516,6 +565,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::CMPZ: return "ARMISD::CMPZ";
case ARMISD::CMPFP: return "ARMISD::CMPFP";
case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
+ case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
case ARMISD::CMOV: return "ARMISD::CMOV";
case ARMISD::CNEG: return "ARMISD::CNEG";
@@ -537,6 +587,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP";
+ case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
+
case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
@@ -572,6 +624,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
+ case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
+ case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
case ARMISD::VDUP: return "ARMISD::VDUP";
case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
case ARMISD::VEXT: return "ARMISD::VEXT";
@@ -581,6 +635,7 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
case ARMISD::VZIP: return "ARMISD::VZIP";
case ARMISD::VUZP: return "ARMISD::VUZP";
case ARMISD::VTRN: return "ARMISD::VTRN";
+ case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
case ARMISD::FMAX: return "ARMISD::FMAX";
case ARMISD::FMIN: return "ARMISD::FMIN";
}
@@ -603,15 +658,33 @@ TargetRegisterClass *ARMTargetLowering::getRegClassFor(EVT VT) const {
/// getFunctionAlignment - Return the Log2 alignment of this function.
unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
- return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
+ return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2;
}
Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
- for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
+ unsigned NumVals = N->getNumValues();
+ if (!NumVals)
+ return Sched::RegPressure;
+
+ for (unsigned i = 0; i != NumVals; ++i) {
EVT VT = N->getValueType(i);
if (VT.isFloatingPoint() || VT.isVector())
return Sched::Latency;
}
+
+ if (!N->isMachineOpcode())
+ return Sched::RegPressure;
+
+ // Load are scheduled for latency even if there instruction itinerary
+ // is not available.
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
+ if (TID.mayLoad())
+ return Sched::Latency;
+
+ const InstrItineraryData &Itins = getTargetMachine().getInstrItineraryData();
+ if (!Itins.isEmpty() && Itins.getStageLatency(TID.getSchedClass()) > 2)
+ return Sched::Latency;
return Sched::RegPressure;
}
@@ -964,11 +1037,28 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
- // ARM target does not yet support tail call optimization.
- isTailCall = false;
+ MachineFunction &MF = DAG.getMachineFunction();
+ bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
+ bool IsSibCall = false;
+ // Temporarily disable tail calls so things don't break.
+ if (!EnableARMTailCalls)
+ isTailCall = false;
+ if (isTailCall) {
+ // Check if it's really possible to do a tail call.
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
+ isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
+ Outs, OutVals, Ins, DAG);
+ // We don't support GuaranteedTailCallOpt for ARM, only automatically
+ // detected sibcalls.
+ if (isTailCall) {
+ ++NumTailCalls;
+ IsSibCall = true;
+ }
+ }
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -981,9 +1071,14 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
+ // For tail calls, memory operands are available in our caller's stack.
+ if (IsSibCall)
+ NumBytes = 0;
+
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ if (!IsSibCall)
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
@@ -996,7 +1091,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
i != e;
++i, ++realArgIdx) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[realArgIdx].Val;
+ SDValue Arg = OutVals[realArgIdx];
ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
// Promote the value if needed.
@@ -1044,7 +1139,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
} else if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
+ } else if (!IsSibCall) {
assert(VA.isMemLoc());
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
@@ -1059,10 +1154,32 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
+ // Tail call byval lowering might overwrite argument registers so in case of
+ // tail call optimization the copies to registers are lowered later.
+ if (!isTailCall)
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ // For tail calls lower the arguments to the 'real' stack slot.
+ if (isTailCall) {
+ // Force all the incoming stack arguments to be loaded from the stack
+ // before any new outgoing arguments are stored to the stack, because the
+ // outgoing stack slots may alias the incoming argument stack slots, and
+ // the alias isn't otherwise explicit. This is slightly more conservative
+ // than necessary, because it means that each store effectively depends
+ // on every argument instead of just those arguments it would clobber.
+
+ // Do not flag preceeding copytoreg stuff together with the following stuff.
+ InFlag = SDValue();
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+ InFlag =SDValue();
}
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
@@ -1071,7 +1188,6 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
bool isDirect = false;
bool isARMFunc = false;
bool isLocalARMFunc = false;
- MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (EnableARMLongCalls) {
@@ -1117,7 +1233,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
// ARM call to a local ARM function is predicable.
- isLocalARMFunc = !Subtarget->isThumb() && !isExt;
+ isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
// tBX takes a register source operand.
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
@@ -1134,7 +1250,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
} else
- Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
+ Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
isDirect = true;
bool isStub = Subtarget->isTargetDarwin() &&
@@ -1171,11 +1287,6 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
: ARMISD::CALL_NOLINK;
}
- if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
- // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
- Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
- InFlag = Chain.getValue(1);
- }
std::vector<SDValue> Ops;
Ops.push_back(Chain);
@@ -1189,9 +1300,13 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (InFlag.getNode())
Ops.push_back(InFlag);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+ if (isTailCall)
+ return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
+
// Returns a chain and a flag for retval copy to use.
- Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
- &Ops[0], Ops.size());
+ Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
@@ -1205,10 +1320,203 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
dl, DAG, InVals);
}
+/// MatchingStackOffset - Return true if the given stack call argument is
+/// already available in the same position (relatively) of the caller's
+/// incoming argument stack.
+static
+bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
+ MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
+ const ARMInstrInfo *TII) {
+ unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
+ int FI = INT_MAX;
+ if (Arg.getOpcode() == ISD::CopyFromReg) {
+ unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+ if (!VR || TargetRegisterInfo::isPhysicalRegister(VR))
+ return false;
+ MachineInstr *Def = MRI->getVRegDef(VR);
+ if (!Def)
+ return false;
+ if (!Flags.isByVal()) {
+ if (!TII->isLoadFromStackSlot(Def, FI))
+ return false;
+ } else {
+ return false;
+ }
+ } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
+ if (Flags.isByVal())
+ // ByVal argument is passed in as a pointer but it's now being
+ // dereferenced. e.g.
+ // define @foo(%struct.X* %A) {
+ // tail call @bar(%struct.X* byval %A)
+ // }
+ return false;
+ SDValue Ptr = Ld->getBasePtr();
+ FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
+ if (!FINode)
+ return false;
+ FI = FINode->getIndex();
+ } else
+ return false;
+
+ assert(FI != INT_MAX);
+ if (!MFI->isFixedObjectIndex(FI))
+ return false;
+ return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
+}
+
+/// IsEligibleForTailCallOptimization - Check whether the call is eligible
+/// for tail call optimization. Targets which want to do tail call
+/// optimization should implement this function.
+bool
+ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const {
+ const Function *CallerF = DAG.getMachineFunction().getFunction();
+ CallingConv::ID CallerCC = CallerF->getCallingConv();
+ bool CCMatch = CallerCC == CalleeCC;
+
+ // Look for obvious safe cases to perform tail call optimization that do not
+ // require ABI changes. This is what gcc calls sibcall.
+
+ // Do not sibcall optimize vararg calls unless the call site is not passing
+ // any arguments.
+ if (isVarArg && !Outs.empty())
+ return false;
+
+ // Also avoid sibcall optimization if either caller or callee uses struct
+ // return semantics.
+ if (isCalleeStructRet || isCallerStructRet)
+ return false;
+
+ // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
+ // emitEpilogue is not ready for them.
+ // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take
+ // LR. This means if we need to reload LR, it takes an extra instructions,
+ // which outweighs the value of the tail call; but here we don't know yet
+ // whether LR is going to be used. Probably the right approach is to
+ // generate the tail call here and turn it back into CALL/RET in
+ // emitEpilogue if LR is used.
+ if (Subtarget->isThumb1Only())
+ return false;
+
+ // For the moment, we can only do this to functions defined in this
+ // compilation, or to indirect calls. A Thumb B to an ARM function,
+ // or vice versa, is not easily fixed up in the linker unlike BL.
+ // (We could do this by loading the address of the callee into a register;
+ // that is an extra instruction over the direct call and burns a register
+ // as well, so is not likely to be a win.)
+
+ // It might be safe to remove this restriction on non-Darwin.
+
+ // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
+ // but we need to make sure there are enough registers; the only valid
+ // registers are the 4 used for parameters. We don't currently do this
+ // case.
+ if (isa<ExternalSymbolSDNode>(Callee))
+ return false;
+
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ if (GV->isDeclaration() || GV->isWeakForLinker())
+ return false;
+ }
+
+ // If the calling conventions do not match, then we'd better make sure the
+ // results are returned in the same way as what the caller expects.
+ if (!CCMatch) {
+ SmallVector<CCValAssign, 16> RVLocs1;
+ CCState CCInfo1(CalleeCC, false, getTargetMachine(),
+ RVLocs1, *DAG.getContext());
+ CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg));
+
+ SmallVector<CCValAssign, 16> RVLocs2;
+ CCState CCInfo2(CallerCC, false, getTargetMachine(),
+ RVLocs2, *DAG.getContext());
+ CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg));
+
+ if (RVLocs1.size() != RVLocs2.size())
+ return false;
+ for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
+ if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
+ return false;
+ if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
+ return false;
+ if (RVLocs1[i].isRegLoc()) {
+ if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
+ return false;
+ } else {
+ if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
+ return false;
+ }
+ }
+ }
+
+ // If the callee takes no arguments then go on to check the results of the
+ // call.
+ if (!Outs.empty()) {
+ // Check if stack adjustment is needed. For now, do not do this if any
+ // argument is passed on the stack.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
+ ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(Outs,
+ CCAssignFnForNode(CalleeCC, false, isVarArg));
+ if (CCInfo.getNextStackOffset()) {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Check if the arguments are already laid out in the right way as
+ // the caller's fixed stack objects.
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineRegisterInfo *MRI = &MF.getRegInfo();
+ const ARMInstrInfo *TII =
+ ((ARMTargetMachine&)getTargetMachine()).getInstrInfo();
+ for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
+ i != e;
+ ++i, ++realArgIdx) {
+ CCValAssign &VA = ArgLocs[i];
+ EVT RegVT = VA.getLocVT();
+ SDValue Arg = OutVals[realArgIdx];
+ ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return false;
+ if (VA.needsCustom()) {
+ // f64 and vector types are split into multiple registers or
+ // register/stack-slot combinations. The types will not match
+ // the registers; give up on memory f64 refs until we figure
+ // out what to do about this.
+ if (!VA.isRegLoc())
+ return false;
+ if (!ArgLocs[++i].isRegLoc())
+ return false;
+ if (RegVT == MVT::v2f64) {
+ if (!ArgLocs[++i].isRegLoc())
+ return false;
+ if (!ArgLocs[++i].isRegLoc())
+ return false;
+ }
+ } else if (!VA.isRegLoc()) {
+ if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
+ MFI, MRI, TII))
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
SDValue
ARMTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
@@ -1239,7 +1547,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue Arg = Outs[realRVLocIdx].Val;
+ SDValue Arg = OutVals[realRVLocIdx];
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
@@ -1477,7 +1785,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
// pair. This is always cheaper.
if (Subtarget->useMovt()) {
return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
- DAG.getTargetGlobalAddress(GV, PtrVT));
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT));
} else {
SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
@@ -1552,9 +1860,7 @@ SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- SDValue Val = Subtarget->isThumb() ?
- DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::SP, MVT::i32) :
- DAG.getConstant(0, MVT::i32);
+ SDValue Val = DAG.getConstant(0, MVT::i32);
return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(0),
Op.getOperand(1), Val);
}
@@ -1568,8 +1874,7 @@ ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget)
- const {
+ const ARMSubtarget *Subtarget) const {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
DebugLoc dl = Op.getDebugLoc();
switch (IntNo) {
@@ -1597,7 +1902,6 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
PseudoSourceValue::getConstantPool(), 0,
false, false, 0);
- SDValue Chain = Result.getValue(1);
if (RelocM == Reloc::PIC_) {
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -1609,25 +1913,21 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
}
static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) {
+ const ARMSubtarget *Subtarget) {
DebugLoc dl = Op.getDebugLoc();
SDValue Op5 = Op.getOperand(5);
- SDValue Res;
unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
- if (isDeviceBarrier) {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- } else {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- }
- return Res;
+ // v6 and v7 can both handle barriers directly, but need handled a bit
+ // differently. Thumb1 and pre-v6 ARM mode use a libcall instead and should
+ // never get here.
+ unsigned Opc = isDeviceBarrier ? ARMISD::SYNCBARRIER : ARMISD::MEMBARRIER;
+ if (Subtarget->hasV7Ops())
+ return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0));
+ else if (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())
+ return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(0, MVT::i32));
+ assert(0 && "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
+ return SDValue();
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
@@ -1712,7 +2012,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue ArgValue2;
if (NextVA.isMemLoc()) {
MachineFrameInfo *MFI = MF.getFrameInfo();
- int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true, false);
+ int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true);
// Create load node to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
@@ -1768,8 +2068,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
VA = ArgLocs[++i]; // skip ahead to next loc
SDValue ArgValue2;
if (VA.isMemLoc()) {
- int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(),
- true, false);
+ int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0,
@@ -1836,8 +2135,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
- int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
- true, false);
+ int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), true);
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
@@ -1868,7 +2166,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
AFI->setVarArgsFrameIndex(
MFI->CreateFixedObject(VARegSaveSize,
ArgOffset + VARegSaveSize - VARegSize,
- true, false));
+ true));
SDValue FIN = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(),
getPointerTy());
@@ -1884,8 +2182,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
- PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()), 0,
- false, false, 0);
+ PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()),
+ 0, false, false, 0);
MemOps.push_back(Store);
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
DAG.getConstant(4, getPointerTy()));
@@ -1895,8 +2193,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
&MemOps[0], MemOps.size());
} else
// This will point to the next argument passed via stack.
- AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset,
- true, false));
+ AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true));
}
return Chain;
@@ -1922,7 +2219,7 @@ static bool isFloatingPointZero(SDValue Op) {
/// the given operands.
SDValue
ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMCC, SelectionDAG &DAG,
+ SDValue &ARMcc, SelectionDAG &DAG,
DebugLoc dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
unsigned C = RHSC->getZExtValue();
@@ -1974,13 +2271,14 @@ ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
CompareType = ARMISD::CMPZ;
break;
}
- ARMCC = DAG.getConstant(CondCode, MVT::i32);
+ ARMcc = DAG.getConstant(CondCode, MVT::i32);
return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
}
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
-static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
- DebugLoc dl) {
+SDValue
+ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
+ DebugLoc dl) const {
SDValue Cmp;
if (!isFloatingPointZero(RHS))
Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
@@ -1999,59 +2297,184 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
if (LHS.getValueType() == MVT::i32) {
- SDValue ARMCC;
+ SDValue ARMcc;
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
- return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
+ SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
+ return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp);
}
ARMCC::CondCodes CondCode, CondCode2;
FPCCToARMCC(CC, CondCode, CondCode2);
- SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
- SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
+ SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
- ARMCC, CCR, Cmp);
+ ARMcc, CCR, Cmp);
if (CondCode2 != ARMCC::AL) {
- SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
+ SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32);
// FIXME: Needs another CMP because flag can have but one use.
SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
Result = DAG.getNode(ARMISD::CMOV, dl, VT,
- Result, TrueVal, ARMCC2, CCR, Cmp2);
+ Result, TrueVal, ARMcc2, CCR, Cmp2);
}
return Result;
}
+/// canChangeToInt - Given the fp compare operand, return true if it is suitable
+/// to morph to an integer compare sequence.
+static bool canChangeToInt(SDValue Op, bool &SeenZero,
+ const ARMSubtarget *Subtarget) {
+ SDNode *N = Op.getNode();
+ if (!N->hasOneUse())
+ // Otherwise it requires moving the value from fp to integer registers.
+ return false;
+ if (!N->getNumValues())
+ return false;
+ EVT VT = Op.getValueType();
+ if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
+ // f32 case is generally profitable. f64 case only makes sense when vcmpe +
+ // vmrs are very slow, e.g. cortex-a8.
+ return false;
+
+ if (isFloatingPointZero(Op)) {
+ SeenZero = true;
+ return true;
+ }
+ return ISD::isNormalLoad(N);
+}
+
+static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
+ if (isFloatingPointZero(Op))
+ return DAG.getConstant(0, MVT::i32);
+
+ if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
+ return DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ Ld->getChain(), Ld->getBasePtr(),
+ Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->isVolatile(), Ld->isNonTemporal(),
+ Ld->getAlignment());
+
+ llvm_unreachable("Unknown VFP cmp argument!");
+}
+
+static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
+ SDValue &RetVal1, SDValue &RetVal2) {
+ if (isFloatingPointZero(Op)) {
+ RetVal1 = DAG.getConstant(0, MVT::i32);
+ RetVal2 = DAG.getConstant(0, MVT::i32);
+ return;
+ }
+
+ if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
+ SDValue Ptr = Ld->getBasePtr();
+ RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ Ld->getChain(), Ptr,
+ Ld->getSrcValue(), Ld->getSrcValueOffset(),
+ Ld->isVolatile(), Ld->isNonTemporal(),
+ Ld->getAlignment());
+
+ EVT PtrType = Ptr.getValueType();
+ unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
+ SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(),
+ PtrType, Ptr, DAG.getConstant(4, PtrType));
+ RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(),
+ Ld->getChain(), NewPtr,
+ Ld->getSrcValue(), Ld->getSrcValueOffset() + 4,
+ Ld->isVolatile(), Ld->isNonTemporal(),
+ NewAlign);
+ return;
+ }
+
+ llvm_unreachable("Unknown VFP cmp argument!");
+}
+
+/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
+/// f32 and even f64 comparisons to integer ones.
+SDValue
+ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
+ DebugLoc dl = Op.getDebugLoc();
+
+ bool SeenZero = false;
+ if (canChangeToInt(LHS, SeenZero, Subtarget) &&
+ canChangeToInt(RHS, SeenZero, Subtarget) &&
+ // If one of the operand is zero, it's safe to ignore the NaN case.
+ (FiniteOnlyFPMath() || SeenZero)) {
+ // If unsafe fp math optimization is enabled and there are no othter uses of
+ // the CMP operands, and the condition code is EQ oe NE, we can optimize it
+ // to an integer comparison.
+ if (CC == ISD::SETOEQ)
+ CC = ISD::SETEQ;
+ else if (CC == ISD::SETUNE)
+ CC = ISD::SETNE;
+
+ SDValue ARMcc;
+ if (LHS.getValueType() == MVT::f32) {
+ LHS = bitcastf32Toi32(LHS, DAG);
+ RHS = bitcastf32Toi32(RHS, DAG);
+ SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
+ SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+ return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
+ Chain, Dest, ARMcc, CCR, Cmp);
+ }
+
+ SDValue LHS1, LHS2;
+ SDValue RHS1, RHS2;
+ expandf64Toi32(LHS, DAG, LHS1, LHS2);
+ expandf64Toi32(RHS, DAG, RHS1, RHS2);
+ ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
+ ARMcc = DAG.getConstant(CondCode, MVT::i32);
+ SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
+ return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7);
+ }
+
+ return SDValue();
+}
+
SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- SDValue Chain = Op.getOperand(0);
+ SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
- SDValue LHS = Op.getOperand(2);
- SDValue RHS = Op.getOperand(3);
- SDValue Dest = Op.getOperand(4);
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
DebugLoc dl = Op.getDebugLoc();
if (LHS.getValueType() == MVT::i32) {
- SDValue ARMCC;
+ SDValue ARMcc;
+ SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, dl);
return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
- Chain, Dest, ARMCC, CCR,Cmp);
+ Chain, Dest, ARMcc, CCR, Cmp);
}
assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
+
+ if (UnsafeFPMath &&
+ (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
+ CC == ISD::SETNE || CC == ISD::SETUNE)) {
+ SDValue Result = OptimizeVFPBrcond(Op, DAG);
+ if (Result.getNode())
+ return Result;
+ }
+
ARMCC::CondCodes CondCode, CondCode2;
FPCCToARMCC(CC, CondCode, CondCode2);
+ SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32);
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
- SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
- SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
+ SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
if (CondCode2 != ARMCC::AL) {
- ARMCC = DAG.getConstant(CondCode2, MVT::i32);
- SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
+ ARMcc = DAG.getConstant(CondCode2, MVT::i32);
+ SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
}
return Res;
@@ -2132,7 +2555,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(Opc, dl, VT, Op);
}
-static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
+SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
// Implement fcopysign with a fabs and a conditional fneg.
SDValue Tmp0 = Op.getOperand(0);
SDValue Tmp1 = Op.getOperand(1);
@@ -2140,10 +2563,11 @@ static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType();
EVT SrcVT = Tmp1.getValueType();
SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
- SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
- SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
+ SDValue ARMcc = DAG.getConstant(ARMCC::LT, MVT::i32);
+ SDValue FP0 = DAG.getConstantFP(0.0, SrcVT);
+ SDValue Cmp = getVFPCmp(Tmp1, FP0, DAG, dl);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
- return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
+ return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMcc, CCR, Cmp);
}
SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
@@ -2206,7 +2630,8 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT,
+ DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
}
// Turn f64->i64 into VMOVRRD.
@@ -2221,51 +2646,18 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
}
/// getZeroVector - Returns a vector of specified type with all zero elements.
-///
+/// Zero vectors are used to represent vector negation and in those cases
+/// will be implemented with the NEON VNEG instruction. However, VNEG does
+/// not support i64 elements, so sometimes the zero vectors will need to be
+/// explicitly constructed. Regardless, use a canonical VMOV to create the
+/// zero vector.
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
assert(VT.isVector() && "Expected a vector type");
-
- // Zero vectors are used to represent vector negation and in those cases
- // will be implemented with the NEON VNEG instruction. However, VNEG does
- // not support i64 elements, so sometimes the zero vectors will need to be
- // explicitly constructed. For those cases, and potentially other uses in
- // the future, always build zero vectors as <16 x i8> or <8 x i8> bitcasted
- // to their dest type. This ensures they get CSE'd.
- SDValue Vec;
- SDValue Cst = DAG.getTargetConstant(0, MVT::i8);
- SmallVector<SDValue, 8> Ops;
- MVT TVT;
-
- if (VT.getSizeInBits() == 64) {
- Ops.assign(8, Cst); TVT = MVT::v8i8;
- } else {
- Ops.assign(16, Cst); TVT = MVT::v16i8;
- }
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
-
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
-}
-
-/// getOnesVector - Returns a vector of specified type with all bits set.
-///
-static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
- assert(VT.isVector() && "Expected a vector type");
-
- // Always build ones vectors as <16 x i8> or <8 x i8> bitcasted to their
- // dest type. This ensures they get CSE'd.
- SDValue Vec;
- SDValue Cst = DAG.getTargetConstant(0xFF, MVT::i8);
- SmallVector<SDValue, 8> Ops;
- MVT TVT;
-
- if (VT.getSizeInBits() == 64) {
- Ops.assign(8, Cst); TVT = MVT::v8i8;
- } else {
- Ops.assign(16, Cst); TVT = MVT::v16i8;
- }
- Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, TVT, &Ops[0], Ops.size());
-
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
+ // The canonical modified immediate encoding of a zero vector is....0!
+ SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
+ EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
+ SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
}
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
@@ -2279,7 +2671,7 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
- SDValue ARMCC;
+ SDValue ARMcc;
unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
@@ -2295,9 +2687,9 @@ SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMCC, DAG, dl);
+ ARMcc, DAG, dl);
SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
- SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC,
+ SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc,
CCR, Cmp);
SDValue Ops[2] = { Lo, Hi };
@@ -2315,7 +2707,7 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
- SDValue ARMCC;
+ SDValue ARMcc;
assert(Op.getOpcode() == ISD::SHL_PARTS);
SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
@@ -2329,9 +2721,9 @@ SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE,
- ARMCC, DAG, dl);
+ ARMcc, DAG, dl);
SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
- SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC,
+ SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc,
CCR, Cmp);
SDValue Ops[2] = { Lo, Hi };
@@ -2516,89 +2908,138 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
return Result;
}
-/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
-/// VMOV instruction, and if so, return the constant being splatted.
-static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
- unsigned SplatBitSize, SelectionDAG &DAG) {
+/// isNEONModifiedImm - Check if the specified splat value corresponds to a
+/// valid vector constant for a NEON instruction with a "modified immediate"
+/// operand (e.g., VMOV). If so, return the encoded value.
+static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
+ unsigned SplatBitSize, SelectionDAG &DAG,
+ EVT &VT, bool is128Bits, bool isVMOV) {
+ unsigned OpCmode, Imm;
+
+ // SplatBitSize is set to the smallest size that splats the vector, so a
+ // zero vector will always have SplatBitSize == 8. However, NEON modified
+ // immediate instructions others than VMOV do not support the 8-bit encoding
+ // of a zero vector, and the default encoding of zero is supposed to be the
+ // 32-bit version.
+ if (SplatBits == 0)
+ SplatBitSize = 32;
+
switch (SplatBitSize) {
case 8:
- // Any 1-byte value is OK.
+ if (!isVMOV)
+ return SDValue();
+ // Any 1-byte value is OK. Op=0, Cmode=1110.
assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
- return DAG.getTargetConstant(SplatBits, MVT::i8);
+ OpCmode = 0xe;
+ Imm = SplatBits;
+ VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
+ break;
case 16:
// NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i16);
- break;
+ VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x00nn: Op=x, Cmode=100x.
+ OpCmode = 0x8;
+ Imm = SplatBits;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0xnn00: Op=x, Cmode=101x.
+ OpCmode = 0xa;
+ Imm = SplatBits >> 8;
+ break;
+ }
+ return SDValue();
case 32:
// NEON's 32-bit VMOV supports splat values where:
// * only one byte is nonzero, or
// * the least significant byte is 0xff and the second byte is nonzero, or
// * the least significant 2 bytes are 0xff and the third is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0 ||
- (SplatBits & ~0xff0000) == 0 ||
- (SplatBits & ~0xff000000) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i32);
+ VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x000000nn: Op=x, Cmode=000x.
+ OpCmode = 0;
+ Imm = SplatBits;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0x0000nn00: Op=x, Cmode=001x.
+ OpCmode = 0x2;
+ Imm = SplatBits >> 8;
+ break;
+ }
+ if ((SplatBits & ~0xff0000) == 0) {
+ // Value = 0x00nn0000: Op=x, Cmode=010x.
+ OpCmode = 0x4;
+ Imm = SplatBits >> 16;
+ break;
+ }
+ if ((SplatBits & ~0xff000000) == 0) {
+ // Value = 0xnn000000: Op=x, Cmode=011x.
+ OpCmode = 0x6;
+ Imm = SplatBits >> 24;
+ break;
+ }
if ((SplatBits & ~0xffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xff) == 0xff)
- return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
+ ((SplatBits | SplatUndef) & 0xff) == 0xff) {
+ // Value = 0x0000nnff: Op=x, Cmode=1100.
+ OpCmode = 0xc;
+ Imm = SplatBits >> 8;
+ SplatBits |= 0xff;
+ break;
+ }
if ((SplatBits & ~0xffffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
- return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
+ ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
+ // Value = 0x00nnffff: Op=x, Cmode=1101.
+ OpCmode = 0xd;
+ Imm = SplatBits >> 16;
+ SplatBits |= 0xffff;
+ break;
+ }
// Note: there are a few 32-bit splat values (specifically: 00ffff00,
// ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
// VMOV.I32. A (very) minor optimization would be to replicate the value
// and fall through here to test for a valid 64-bit splat. But, then the
// caller would also need to check and handle the change in size.
- break;
+ return SDValue();
case 64: {
+ if (!isVMOV)
+ return SDValue();
// NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
uint64_t BitMask = 0xff;
uint64_t Val = 0;
+ unsigned ImmMask = 1;
+ Imm = 0;
for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if (((SplatBits | SplatUndef) & BitMask) == BitMask)
+ if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
Val |= BitMask;
- else if ((SplatBits & BitMask) != 0)
+ Imm |= ImmMask;
+ } else if ((SplatBits & BitMask) != 0) {
return SDValue();
+ }
BitMask <<= 8;
+ ImmMask <<= 1;
}
- return DAG.getTargetConstant(Val, MVT::i64);
- }
-
- default:
- llvm_unreachable("unexpected size for isVMOVSplat");
+ // Op=1, Cmode=1110.
+ OpCmode = 0x1e;
+ SplatBits = Val;
+ VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
break;
}
- return SDValue();
-}
-
-/// getVMOVImm - If this is a build_vector of constants which can be
-/// formed by using a VMOV instruction of the specified element size,
-/// return the constant being splatted. The ByteSize field indicates the
-/// number of bytes of each element [1248].
-SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
- BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
- APInt SplatBits, SplatUndef;
- unsigned SplatBitSize;
- bool HasAnyUndefs;
- if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
- HasAnyUndefs, ByteSize * 8))
- return SDValue();
-
- if (SplatBitSize > ByteSize * 8)
+ default:
+ llvm_unreachable("unexpected size for isNEONModifiedImm");
return SDValue();
+ }
- return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
- SplatBitSize, DAG);
+ unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
+ return DAG.getTargetConstant(EncodedVal, MVT::i32);
}
static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
@@ -2789,43 +3230,6 @@ static bool isVZIP_v_undef_Mask(const SmallVectorImpl<int> &M, EVT VT,
return true;
}
-
-static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
- // Canonicalize all-zeros and all-ones vectors.
- ConstantSDNode *ConstVal = cast<ConstantSDNode>(Val.getNode());
- if (ConstVal->isNullValue())
- return getZeroVector(VT, DAG, dl);
- if (ConstVal->isAllOnesValue())
- return getOnesVector(VT, DAG, dl);
-
- EVT CanonicalVT;
- if (VT.is64BitVector()) {
- switch (Val.getValueType().getSizeInBits()) {
- case 8: CanonicalVT = MVT::v8i8; break;
- case 16: CanonicalVT = MVT::v4i16; break;
- case 32: CanonicalVT = MVT::v2i32; break;
- case 64: CanonicalVT = MVT::v1i64; break;
- default: llvm_unreachable("unexpected splat element type"); break;
- }
- } else {
- assert(VT.is128BitVector() && "unknown splat vector size");
- switch (Val.getValueType().getSizeInBits()) {
- case 8: CanonicalVT = MVT::v16i8; break;
- case 16: CanonicalVT = MVT::v8i16; break;
- case 32: CanonicalVT = MVT::v4i32; break;
- case 64: CanonicalVT = MVT::v2i64; break;
- default: llvm_unreachable("unexpected splat element type"); break;
- }
- }
-
- // Build a canonical splat for this value.
- SmallVector<SDValue, 8> Ops;
- Ops.assign(CanonicalVT.getVectorNumElements(), Val);
- SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, &Ops[0],
- Ops.size());
- return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Res);
-}
-
// If this is a case we can't handle, return null and let the default
// expansion code take care of it.
static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
@@ -2838,10 +3242,26 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
bool HasAnyUndefs;
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
if (SplatBitSize <= 64) {
- SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
- SplatUndef.getZExtValue(), SplatBitSize, DAG);
- if (Val.getNode())
- return BuildSplat(Val, VT, DAG, dl);
+ // Check if an immediate VMOV works.
+ EVT VmovVT;
+ SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
+ SplatUndef.getZExtValue(), SplatBitSize,
+ DAG, VmovVT, VT.is128BitVector(), true);
+ if (Val.getNode()) {
+ SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ }
+
+ // Try an immediate VMVN.
+ uint64_t NegatedImm = (SplatBits.getZExtValue() ^
+ ((1LL << SplatBitSize) - 1));
+ Val = isNEONModifiedImm(NegatedImm,
+ SplatUndef.getZExtValue(), SplatBitSize,
+ DAG, VmovVT, VT.is128BitVector(), false);
+ if (Val.getNode()) {
+ SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
+ }
}
}
@@ -2883,21 +3303,17 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(ARMISD::VDUP, dl, VT, Value);
// Vectors with 32- or 64-bit elements can be built by directly assigning
- // the subregisters.
+ // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
+ // will be legalized.
if (EltSize >= 32) {
// Do the expansion with floating-point types, since that is what the VFP
// registers are defined to use, and since i64 is not legal.
EVT EltVT = EVT::getFloatingPointVT(EltSize);
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
- SDValue Val = DAG.getUNDEF(VecVT);
- for (unsigned i = 0; i < NumElts; ++i) {
- SDValue Elt = Op.getOperand(i);
- if (Elt.getOpcode() == ISD::UNDEF)
- continue;
- Elt = DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Elt);
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Val, Elt,
- DAG.getConstant(i, MVT::i32));
- }
+ SmallVector<SDValue, 8> Ops;
+ for (unsigned i = 0; i < NumElts; ++i)
+ Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i)));
+ SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
}
@@ -2934,7 +3350,9 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
bool ReverseVEXT;
unsigned Imm, WhichResult;
- return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ return (EltSize >= 32 ||
+ ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
isVREVMask(M, VT, 64) ||
isVREVMask(M, VT, 32) ||
isVREVMask(M, VT, 16) ||
@@ -3032,59 +3450,62 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// of the same time so that they get CSEd properly.
SVN->getMask(ShuffleMask);
- if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
- int Lane = SVN->getSplatIndex();
- // If this is undef splat, generate it via "just" vdup, if possible.
- if (Lane == -1) Lane = 0;
-
- if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
+ unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ if (EltSize <= 32) {
+ if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
+ int Lane = SVN->getSplatIndex();
+ // If this is undef splat, generate it via "just" vdup, if possible.
+ if (Lane == -1) Lane = 0;
+
+ if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
+ }
+ return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
+ DAG.getConstant(Lane, MVT::i32));
}
- return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
- DAG.getConstant(Lane, MVT::i32));
- }
- bool ReverseVEXT;
- unsigned Imm;
- if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
- if (ReverseVEXT)
- std::swap(V1, V2);
- return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
- DAG.getConstant(Imm, MVT::i32));
- }
-
- if (isVREVMask(ShuffleMask, VT, 64))
- return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
- if (isVREVMask(ShuffleMask, VT, 32))
- return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
- if (isVREVMask(ShuffleMask, VT, 16))
- return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
-
- // Check for Neon shuffles that modify both input vectors in place.
- // If both results are used, i.e., if there are two shuffles with the same
- // source operands and with masks corresponding to both results of one of
- // these operations, DAG memoization will ensure that a single node is
- // used for both shuffles.
- unsigned WhichResult;
- if (isVTRNMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVUZPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVZIPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
+ bool ReverseVEXT;
+ unsigned Imm;
+ if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
+ if (ReverseVEXT)
+ std::swap(V1, V2);
+ return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
+ DAG.getConstant(Imm, MVT::i32));
+ }
- if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
+ if (isVREVMask(ShuffleMask, VT, 64))
+ return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
+ if (isVREVMask(ShuffleMask, VT, 32))
+ return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
+ if (isVREVMask(ShuffleMask, VT, 16))
+ return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
+
+ // Check for Neon shuffles that modify both input vectors in place.
+ // If both results are used, i.e., if there are two shuffles with the same
+ // source operands and with masks corresponding to both results of one of
+ // these operations, DAG memoization will ensure that a single node is
+ // used for both shuffles.
+ unsigned WhichResult;
+ if (isVTRNMask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
+ V1, V2).getValue(WhichResult);
+ if (isVUZPMask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
+ V1, V2).getValue(WhichResult);
+ if (isVZIPMask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
+ V1, V2).getValue(WhichResult);
+
+ if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
+ V1, V1).getValue(WhichResult);
+ if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
+ V1, V1).getValue(WhichResult);
+ if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
+ V1, V1).getValue(WhichResult);
+ }
// If the shuffle is not directly supported and it has 4 elements, use
// the PerfectShuffle-generated table to synthesize it from other shuffles.
@@ -3108,8 +3529,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
}
- // Implement shuffles with 32- or 64-bit elements as subreg copies.
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
if (EltSize >= 32) {
// Do the expansion with floating-point types, since that is what the VFP
// registers are defined to use, and since i64 is not legal.
@@ -3117,17 +3537,17 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1);
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2);
- SDValue Val = DAG.getUNDEF(VecVT);
+ SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) {
if (ShuffleMask[i] < 0)
- continue;
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
- ShuffleMask[i] < (int)NumElts ? V1 : V2,
- DAG.getConstant(ShuffleMask[i] & (NumElts-1),
- MVT::i32));
- Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Val,
- Elt, DAG.getConstant(i, MVT::i32));
+ Ops.push_back(DAG.getUNDEF(EltVT));
+ else
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
+ ShuffleMask[i] < (int)NumElts ? V1 : V2,
+ DAG.getConstant(ShuffleMask[i] & (NumElts-1),
+ MVT::i32)));
}
+ SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
}
@@ -3277,7 +3697,12 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
MF->insert(It, loop1MBB);
MF->insert(It, loop2MBB);
MF->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+
+ // Transfer the remainder of BB and its successor edges to exitMBB.
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
// ...
@@ -3315,7 +3740,7 @@ ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
// ...
BB = exitMBB;
- MF->DeleteMachineInstr(MI); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return BB;
}
@@ -3358,7 +3783,12 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MF->insert(It, loopMBB);
MF->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+
+ // Transfer the remainder of BB and its successor edges to exitMBB.
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = MF->getRegInfo();
unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
@@ -3403,11 +3833,20 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
// ...
BB = exitMBB;
- MF->DeleteMachineInstr(MI); // The instruction is gone now.
+ MI->eraseFromParent(); // The instruction is gone now.
return BB;
}
+static
+MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
+ for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
+ E = MBB->succ_end(); I != E; ++I)
+ if (*I != Succ)
+ return *I;
+ llvm_unreachable("Expecting a BB with two successors!");
+}
+
MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
@@ -3488,22 +3927,21 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
- .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB)
+ .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -3516,11 +3954,52 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII->get(ARM::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII->get(ARM::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+ }
+
+ case ARM::BCCi64:
+ case ARM::BCCZi64: {
+ // Compare both parts that make up the double comparison separately for
+ // equality.
+ bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
+
+ unsigned LHS1 = MI->getOperand(1).getReg();
+ unsigned LHS2 = MI->getOperand(2).getReg();
+ if (RHSisZero) {
+ AddDefaultPred(BuildMI(BB, dl,
+ TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
+ .addReg(LHS1).addImm(0));
+ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
+ .addReg(LHS2).addImm(0)
+ .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ } else {
+ unsigned RHS1 = MI->getOperand(3).getReg();
+ unsigned RHS2 = MI->getOperand(4).getReg();
+ AddDefaultPred(BuildMI(BB, dl,
+ TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
+ .addReg(LHS1).addReg(RHS1));
+ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
+ .addReg(LHS2).addReg(RHS2)
+ .addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ }
+
+ MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB();
+ MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
+ if (MI->getOperand(0).getImm() == ARMCC::NE)
+ std::swap(destMBB, exitMBB);
+
+ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
+ .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2B : ARM::B))
+ .addMBB(exitMBB);
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -3541,7 +4020,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(SrcReg);
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
? ARM::tMOVtgpr2gpr : ARM::tMOVgpr2gpr;
- BuildMI(BB, dl, TII->get(CopyOpc), ARM::SP)
+ BuildMI(*BB, MI, dl, TII->get(CopyOpc), ARM::SP)
.addReg(SrcReg, getKillRegState(SrcIsKill));
}
@@ -3573,7 +4052,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
NeedPred = true; NeedCC = true; NeedOp3 = true;
break;
}
- MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(OpOpc), ARM::SP);
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(OpOpc), ARM::SP);
if (OpOpc == ARM::tAND)
AddDefaultT1CC(MIB);
MIB.addReg(ARM::SP);
@@ -3589,10 +4068,10 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(DstReg);
unsigned CopyOpc = (RC == ARM::tGPRRegisterClass)
? ARM::tMOVgpr2tgpr : ARM::tMOVgpr2gpr;
- BuildMI(BB, dl, TII->get(CopyOpc))
+ BuildMI(*BB, MI, dl, TII->get(CopyOpc))
.addReg(DstReg, getDefRegState(true) | getDeadRegState(DstIsDead))
.addReg(ARM::SP);
- MF->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
}
@@ -3763,6 +4242,35 @@ static SDValue PerformVMOVRRDCombine(SDNode *N,
return SDValue();
}
+/// PerformVDUPLANECombine - Target-specific dag combine xforms for
+/// ARMISD::VDUPLANE.
+static SDValue PerformVDUPLANECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
+ // redundant.
+ SDValue Op = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // Ignore bit_converts.
+ while (Op.getOpcode() == ISD::BIT_CONVERT)
+ Op = Op.getOperand(0);
+ if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
+ return SDValue();
+
+ // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
+ unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
+ // The canonical VMOV for a zero vector uses a 32-bit element size.
+ unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ unsigned EltBits;
+ if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
+ EltSize = 8;
+ if (EltSize > VT.getVectorElementType().getSizeInBits())
+ return SDValue();
+
+ SDValue Res = DCI.DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
+ return DCI.CombineTo(N, Res, false);
+}
+
/// getVShiftImm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift operation, where all the elements of the
/// build_vector must have the same constant integer value.
@@ -3893,7 +4401,8 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
// Narrowing shifts require an immediate right shift.
if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
break;
- llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
+ llvm_unreachable("invalid shift count for narrowing vector shift "
+ "intrinsic");
default:
llvm_unreachable("unhandled vector shift");
@@ -4140,6 +4649,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SUB: return PerformSUBCombine(N, DCI);
case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI);
+ case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
case ISD::SHL:
case ISD::SRA:
@@ -4156,14 +4666,13 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
if (!Subtarget->hasV6Ops())
// Pre-v6 does not support unaligned mem access.
return false;
- else {
- // v6+ may or may not support unaligned mem access depending on the system
- // configuration.
- // FIXME: This is pretty conservative. Should we provide cmdline option to
- // control the behaviour?
- if (!Subtarget->isTargetDarwin())
- return false;
- }
+
+ // v6+ may or may not support unaligned mem access depending on the system
+ // configuration.
+ // FIXME: This is pretty conservative. Should we provide cmdline option to
+ // control the behaviour?
+ if (!Subtarget->isTargetDarwin())
+ return false;
switch (VT.getSimpleVT().SimpleTy) {
default:
@@ -4619,7 +5128,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
}
}
if (StringRef("{cc}").equals_lower(Constraint))
- return std::make_pair(0U, ARM::CCRRegisterClass);
+ return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
@@ -4669,7 +5178,6 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
/// vector. If it is invalid, don't add anything to Ops.
void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char Constraint,
- bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0, 0);
@@ -4818,8 +5326,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Ops.push_back(Result);
return;
}
- return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
- Ops, DAG);
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
bool
diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
index 9c7517c..128b72e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -53,6 +53,8 @@ namespace llvm {
CMOV, // ARM conditional move instructions.
CNEG, // ARM conditional negate instructions.
+ BCC_i64,
+
RBIT, // ARM bitreverse instruction
FTOSI, // FP to sint within a FP register.
@@ -70,6 +72,8 @@ namespace llvm {
EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
+ TC_RETURN, // Tail call return pseudo.
+
THREAD_POINTER,
DYN_ALLOC, // Dynamic allocation on the stack.
@@ -120,6 +124,10 @@ namespace llvm {
VGETLANEu, // zero-extend vector extract element
VGETLANEs, // sign-extend vector extract element
+ // Vector move immediate and move negated immediate:
+ VMOVIMM,
+ VMVNIMM,
+
// Vector duplicate:
VDUP,
VDUPLANE,
@@ -133,6 +141,13 @@ namespace llvm {
VUZP, // unzip (deinterleave)
VTRN, // transpose
+ // Operands of the standard BUILD_VECTOR node are not legalized, which
+ // is fine if BUILD_VECTORs are always lowered to shuffles or other
+ // operations, but for ARM some BUILD_VECTORs are legal as-is and their
+ // operands need to be legalized. Define an ARM-specific version of
+ // BUILD_VECTOR for this purpose.
+ BUILD_VECTOR,
+
// Floating-point max and min:
FMAX,
FMIN
@@ -141,12 +156,6 @@ namespace llvm {
/// Define some predicates that are used for node matching.
namespace ARM {
- /// getVMOVImm - If this is a build_vector of constants which can be
- /// formed by using a VMOV instruction of the specified element size,
- /// return the constant being splatted. The ByteSize field indicates the
- /// number of bytes of each element [1248].
- SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
-
/// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
/// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
/// instruction, returns its 8-bit integer representation. Otherwise,
@@ -189,9 +198,9 @@ namespace llvm {
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
- /// icmp immediate, that is the target has icmp instructions which can compare
- /// a register against the immediate without having to materialize the
- /// immediate into a register.
+ /// icmp immediate, that is the target has icmp instructions which can
+ /// compare a register against the immediate without having to materialize
+ /// the immediate into a register.
virtual bool isLegalICmpImmediate(int64_t Imm) const;
/// getPreIndexedAddressParts - returns true by value, base pointer and
@@ -232,7 +241,6 @@ namespace llvm {
/// being processed is 'm'.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -282,7 +290,8 @@ namespace llvm {
SDValue &Root, SelectionDAG &DAG,
DebugLoc dl) const;
- CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const;
+ CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
+ bool isVarArg) const;
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
DebugLoc dl, SelectionDAG &DAG,
const CCValAssign &VA,
@@ -303,6 +312,7 @@ namespace llvm {
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
@@ -327,18 +337,36 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Targets which want to do tail call
+ /// optimization should implement this function.
+ bool IsEligibleForTailCallOptimization(SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
- SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl) const;
+ SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
+ SDValue getVFPCmp(SDValue LHS, SDValue RHS,
+ SelectionDAG &DAG, DebugLoc dl) const;
+
+ SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
MachineBasicBlock *BB,
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
index d487df1..ac568e7 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrFormats.td
@@ -50,27 +50,23 @@ def VFPLdStMulFrm : Format<22>;
def VFPMiscFrm : Format<23>;
def ThumbFrm : Format<24>;
-
-def NEONFrm : Format<25>;
-def NEONGetLnFrm : Format<26>;
-def NEONSetLnFrm : Format<27>;
-def NEONDupFrm : Format<28>;
-
-def MiscFrm : Format<29>;
-def ThumbMiscFrm : Format<30>;
-
-def NLdStFrm : Format<31>;
-def N1RegModImmFrm : Format<32>;
-def N2RegFrm : Format<33>;
-def NVCVTFrm : Format<34>;
-def NVDupLnFrm : Format<35>;
-def N2RegVShLFrm : Format<36>;
-def N2RegVShRFrm : Format<37>;
-def N3RegFrm : Format<38>;
-def N3RegVShFrm : Format<39>;
-def NVExtFrm : Format<40>;
-def NVMulSLFrm : Format<41>;
-def NVTBLFrm : Format<42>;
+def MiscFrm : Format<25>;
+
+def NGetLnFrm : Format<26>;
+def NSetLnFrm : Format<27>;
+def NDupFrm : Format<28>;
+def NLdStFrm : Format<29>;
+def N1RegModImmFrm: Format<30>;
+def N2RegFrm : Format<31>;
+def NVCVTFrm : Format<32>;
+def NVDupLnFrm : Format<33>;
+def N2RegVShLFrm : Format<34>;
+def N2RegVShRFrm : Format<35>;
+def N3RegFrm : Format<36>;
+def N3RegVShFrm : Format<37>;
+def NVExtFrm : Format<38>;
+def NVMulSLFrm : Format<39>;
+def NVTBLFrm : Format<40>;
// Misc flags.
@@ -1653,17 +1649,17 @@ class NVLaneOp<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
class NVGetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
dag oops, dag iops, InstrItinClass itin,
string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONGetLnFrm, itin,
+ : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NGetLnFrm, itin,
opc, dt, asm, pattern>;
class NVSetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
dag oops, dag iops, InstrItinClass itin,
string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONSetLnFrm, itin,
+ : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NSetLnFrm, itin,
opc, dt, asm, pattern>;
class NVDup<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
dag oops, dag iops, InstrItinClass itin,
string opc, string dt, string asm, list<dag> pattern>
- : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONDupFrm, itin,
+ : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NDupFrm, itin,
opc, dt, asm, pattern>;
// Vector Duplicate Lane (from scalar to all elements)
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index 85f6b40..ba228ff 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -63,7 +63,7 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
void ARMInstrInfo::
reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo &TRI) const {
DebugLoc dl = Orig->getDebugLoc();
unsigned Opcode = Orig->getOpcode();
switch (Opcode) {
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
index d4199d1..4563ffe 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -35,7 +35,7 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
index f3156d9..51fc152 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -38,6 +38,12 @@ def SDT_ARMBr2JT : SDTypeProfile<0, 4,
[SDTCisPtrTy<0>, SDTCisVT<1, i32>,
SDTCisVT<2, i32>, SDTCisVT<3, i32>]>;
+def SDT_ARMBCC_i64 : SDTypeProfile<0, 6,
+ [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>, SDTCisVT<2, i32>,
+ SDTCisVT<3, i32>, SDTCisVT<4, i32>,
+ SDTCisVT<5, OtherVT>]>;
+
def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
@@ -53,6 +59,8 @@ def SDT_ARMSYNCBARRIERV7 : SDTypeProfile<0, 0, []>;
def SDT_ARMMEMBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
def SDT_ARMSYNCBARRIERV6 : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_ARMTCRET : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
// Node definitions.
def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>;
def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>;
@@ -88,6 +96,9 @@ def ARMbrjt : SDNode<"ARMISD::BR_JT", SDT_ARMBrJT,
def ARMbr2jt : SDNode<"ARMISD::BR2_JT", SDT_ARMBr2JT,
[SDNPHasChain]>;
+def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64,
+ [SDNPHasChain]>;
+
def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp,
[SDNPOutFlag]>;
@@ -117,6 +128,9 @@ def ARMSyncBarrierV6 : SDNode<"ARMISD::SYNCBARRIER", SDT_ARMMEMBARRIERV6,
def ARMrbit : SDNode<"ARMISD::RBIT", SDTIntUnaryOp>;
+def ARMtcret : SDNode<"ARMISD::TC_RETURN", SDT_ARMTCRET,
+ [SDNPHasChain, SDNPOptInFlag, SDNPVariadic]>;
+
//===----------------------------------------------------------------------===//
// ARM Instruction Predicate Definitions.
//
@@ -858,13 +872,13 @@ def LEApcrel : AXI1<0x0, (outs GPR:$dst), (ins i32imm:$label, pred:$p),
Pseudo, IIC_iALUi,
"adr$p\t$dst, #$label", []>;
+} // neverHasSideEffects
def LEApcrelJT : AXI1<0x0, (outs GPR:$dst),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
Pseudo, IIC_iALUi,
"adr$p\t$dst, #${label}_${id}", []> {
let Inst{25} = 1;
}
-} // neverHasSideEffects
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
@@ -1026,6 +1040,74 @@ let isCall = 1,
}
}
+// Tail calls.
+
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
+ // Darwin versions.
+ let Defs = [R0, R1, R2, R3, R9, R12,
+ D0, D1, D2, D3, D4, D5, D6, D7,
+ D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
+ D27, D28, D29, D30, D31, PC],
+ Uses = [SP] in {
+ def TCRETURNdi : AInoP<(outs), (ins i32imm:$dst, variable_ops),
+ Pseudo, IIC_Br,
+ "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+
+ def TCRETURNri : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
+ Pseudo, IIC_Br,
+ "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+
+ def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+ IIC_Br, "b\t$dst @ TAILCALL",
+ []>, Requires<[IsDarwin]>;
+
+ def TAILJMPdt: ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+ IIC_Br, "b.w\t$dst @ TAILCALL",
+ []>, Requires<[IsDarwin]>;
+
+ def TAILJMPr : AXI<(outs), (ins tcGPR:$dst, variable_ops),
+ BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
+ []>, Requires<[IsDarwin]> {
+ let Inst{7-4} = 0b0001;
+ let Inst{19-8} = 0b111111111111;
+ let Inst{27-20} = 0b00010010;
+ let Inst{31-28} = 0b1110;
+ }
+ }
+
+ // Non-Darwin versions (the difference is R9).
+ let Defs = [R0, R1, R2, R3, R12,
+ D0, D1, D2, D3, D4, D5, D6, D7,
+ D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
+ D27, D28, D29, D30, D31, PC],
+ Uses = [SP] in {
+ def TCRETURNdiND : AInoP<(outs), (ins i32imm:$dst, variable_ops),
+ Pseudo, IIC_Br,
+ "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+
+ def TCRETURNriND : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
+ Pseudo, IIC_Br,
+ "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+
+ def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+ IIC_Br, "b\t$dst @ TAILCALL",
+ []>, Requires<[IsARM, IsNotDarwin]>;
+
+ def TAILJMPdNDt : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+ IIC_Br, "b.w\t$dst @ TAILCALL",
+ []>, Requires<[IsThumb, IsNotDarwin]>;
+
+ def TAILJMPrND : AXI<(outs), (ins tcGPR:$dst, variable_ops),
+ BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
+ []>, Requires<[IsNotDarwin]> {
+ let Inst{7-4} = 0b0001;
+ let Inst{19-8} = 0b111111111111;
+ let Inst{27-20} = 0b00010010;
+ let Inst{31-28} = 0b1110;
+ }
+ }
+}
+
let isBranch = 1, isTerminator = 1 in {
// B is "predicable" since it can be xformed into a Bcc.
let isBarrier = 1 in {
@@ -1397,6 +1479,14 @@ def MOVr : AsI1<0b1101, (outs GPR:$dst), (ins GPR:$src), DPFrm, IIC_iMOVr,
let Inst{25} = 0;
}
+// A version for the smaller set of tail call registers.
+let neverHasSideEffects = 1 in
+def MOVr_TC : AsI1<0b1101, (outs tcGPR:$dst), (ins tcGPR:$src), DPFrm,
+ IIC_iMOVr, "mov", "\t$dst, $src", []>, UnaryDP {
+ let Inst{11-4} = 0b00000000;
+ let Inst{25} = 0;
+}
+
def MOVs : AsI1<0b1101, (outs GPR:$dst), (ins so_reg:$src),
DPSoRegFrm, IIC_iMOVsr,
"mov", "\t$dst, $src", [(set GPR:$dst, so_reg:$src)]>, UnaryDP {
@@ -1604,13 +1694,19 @@ def RSCSrs : AXI1<0b0111, (outs GPR:$dst), (ins GPR:$a, so_reg:$b),
}
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
+// The assume-no-carry-in form uses the negation of the input since add/sub
+// assume opposite meanings of the carry flag (i.e., carry == !borrow).
+// See the definition of AddWithCarry() in the ARM ARM A2.2.1 for the gory
+// details.
def : ARMPat<(add GPR:$src, so_imm_neg:$imm),
(SUBri GPR:$src, so_imm_neg:$imm)>;
-
-//def : ARMPat<(addc GPR:$src, so_imm_neg:$imm),
-// (SUBSri GPR:$src, so_imm_neg:$imm)>;
-//def : ARMPat<(adde GPR:$src, so_imm_neg:$imm),
-// (SBCri GPR:$src, so_imm_neg:$imm)>;
+def : ARMPat<(addc GPR:$src, so_imm_neg:$imm),
+ (SUBSri GPR:$src, so_imm_neg:$imm)>;
+// The with-carry-in form matches bitwise not instead of the negation.
+// Effectively, the inverse interpretation of the carry flag already accounts
+// for part of the negation.
+def : ARMPat<(adde GPR:$src, so_imm_not:$imm),
+ (SBCri GPR:$src, so_imm_not:$imm)>;
// Note: These are implemented in C++ code, because they have to generate
// ADD/SUBrs instructions, which use a complex pattern that a xform function
@@ -2198,6 +2294,22 @@ defm CMNz : AI1_cmp_irs<0b1011, "cmn",
def : ARMPat<(ARMcmpZ GPR:$src, so_imm_neg:$imm),
(CMNzri GPR:$src, so_imm_neg:$imm)>;
+// Pseudo i64 compares for some floating point compares.
+let usesCustomInserter = 1, isBranch = 1, isTerminator = 1,
+ Defs = [CPSR] in {
+def BCCi64 : PseudoInst<(outs),
+ (ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, brtarget:$dst),
+ IIC_Br,
+ "${:comment} B\t$dst GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, imm:$cc",
+ [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, GPR:$rhs1, GPR:$rhs2, bb:$dst)]>;
+
+def BCCZi64 : PseudoInst<(outs),
+ (ins i32imm:$cc, GPR:$lhs1, GPR:$lhs2, brtarget:$dst),
+ IIC_Br,
+ "${:comment} B\t$dst GPR:$lhs1, GPR:$lhs2, 0, 0, imm:$cc",
+ [(ARMBcci64 imm:$cc, GPR:$lhs1, GPR:$lhs2, 0, 0, bb:$dst)]>;
+} // usesCustomInserter
+
// Conditional moves
// FIXME: should be able to write a pattern for ARMcmov, but can't use
@@ -2530,31 +2642,30 @@ let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
- D31 ] in {
+ D31 ], hasSideEffects = 1, isBarrier = 1 in {
def Int_eh_sjlj_setjmp : XI<(outs), (ins GPR:$src, GPR:$val),
AddrModeNone, SizeSpecial, IndexModeNone,
Pseudo, NoItinerary,
- "str\tsp, [$src, #+8] ${:comment} eh_setjmp begin\n\t"
- "add\t$val, pc, #8\n\t"
- "str\t$val, [$src, #+4]\n\t"
- "mov\tr0, #0\n\t"
- "add\tpc, pc, #0\n\t"
- "mov\tr0, #1 ${:comment} eh_setjmp end", "",
+ "add\t$val, pc, #8\t${:comment} eh_setjmp begin\n\t"
+ "str\t$val, [$src, #+4]\n\t"
+ "mov\tr0, #0\n\t"
+ "add\tpc, pc, #0\n\t"
+ "mov\tr0, #1 ${:comment} eh_setjmp end", "",
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, HasVFP2]>;
}
let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ] in {
+ [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ],
+ hasSideEffects = 1, isBarrier = 1 in {
def Int_eh_sjlj_setjmp_nofp : XI<(outs), (ins GPR:$src, GPR:$val),
AddrModeNone, SizeSpecial, IndexModeNone,
Pseudo, NoItinerary,
- "str\tsp, [$src, #+8] ${:comment} eh_setjmp begin\n\t"
- "add\t$val, pc, #8\n\t"
- "str\t$val, [$src, #+4]\n\t"
- "mov\tr0, #0\n\t"
- "add\tpc, pc, #0\n\t"
- "mov\tr0, #1 ${:comment} eh_setjmp end", "",
+ "add\t$val, pc, #8\n ${:comment} eh_setjmp begin\n\t"
+ "str\t$val, [$src, #+4]\n\t"
+ "mov\tr0, #0\n\t"
+ "add\tpc, pc, #0\n\t"
+ "mov\tr0, #1 ${:comment} eh_setjmp end", "",
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
Requires<[IsARM, NoVFP]>;
}
@@ -2621,6 +2732,24 @@ def : ARMPat<(ARMWrapperJT tjumptable:$dst, imm:$id),
// TODO: add,sub,and, 3-instr forms?
+// Tail calls
+def : ARMPat<(ARMtcret tcGPR:$dst),
+ (TCRETURNri tcGPR:$dst)>, Requires<[IsDarwin]>;
+
+def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
+ (TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
+
+def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
+ (TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
+
+def : ARMPat<(ARMtcret tcGPR:$dst),
+ (TCRETURNriND tcGPR:$dst)>, Requires<[IsNotDarwin]>;
+
+def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
+ (TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
+
+def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
+ (TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
// Direct calls
def : ARMPat<(ARMcall texternalsym:$func), (BL texternalsym:$func)>,
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
index 197ec16..7f7eb98 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrNEON.td
@@ -65,6 +65,10 @@ def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
+def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
+def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
+def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
+
def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
// VDUPLANE can produce a quad-register result from a double-register source,
@@ -94,21 +98,26 @@ def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>,
def NEONfmax : SDNode<"ARMISD::FMAX", SDTARMFMAX>;
def NEONfmin : SDNode<"ARMISD::FMIN", SDTARMFMAX>;
+def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
+ ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
+ unsigned EltBits;
+ uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
+ return (EltBits == 32 && EltVal == 0);
+}]>;
+
+def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
+ ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
+ unsigned EltBits;
+ uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
+ return (EltBits == 8 && EltVal == 0xff);
+}]>;
+
//===----------------------------------------------------------------------===//
// NEON operand definitions
//===----------------------------------------------------------------------===//
-def h8imm : Operand<i8> {
- let PrintMethod = "printHex8ImmOperand";
-}
-def h16imm : Operand<i16> {
- let PrintMethod = "printHex16ImmOperand";
-}
-def h32imm : Operand<i32> {
- let PrintMethod = "printHex32ImmOperand";
-}
-def h64imm : Operand<i64> {
- let PrintMethod = "printHex64ImmOperand";
+def nModImm : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
}
//===----------------------------------------------------------------------===//
@@ -812,11 +821,6 @@ def DSubReg_f64_reg : SDNodeXForm<imm, [{
assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), MVT::i32);
}]>;
-def DSubReg_f64_other_reg : SDNodeXForm<imm, [{
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- return CurDAG->getTargetConstant(ARM::dsub_0 + (1 - N->getZExtValue()),
- MVT::i32);
-}]>;
// Extract S sub-registers of Q/D registers.
def SSubReg_f32_reg : SDNodeXForm<imm, [{
@@ -2282,7 +2286,7 @@ def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
NEONvceq, 1>;
// For disassembly only.
defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
- "$dst, $src, #0">;
+ "$dst, $src, #0">;
// VCGE : Vector Compare Greater Than or Equal
defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
@@ -2332,10 +2336,10 @@ defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
// Vector Bitwise Operations.
-def vnot8 : PatFrag<(ops node:$in),
- (xor node:$in, (bitconvert (v8i8 immAllOnesV)))>;
-def vnot16 : PatFrag<(ops node:$in),
- (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
+def vnotd : PatFrag<(ops node:$in),
+ (xor node:$in, (bitconvert (v8i8 NEONimmAllOnesV)))>;
+def vnotq : PatFrag<(ops node:$in),
+ (xor node:$in, (bitconvert (v16i8 NEONimmAllOnesV)))>;
// VAND : Vector Bitwise AND
@@ -2361,36 +2365,58 @@ def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
(ins DPR:$src1, DPR:$src2), N3RegFrm, IIC_VBINiD,
"vbic", "$dst, $src1, $src2", "",
[(set DPR:$dst, (v2i32 (and DPR:$src1,
- (vnot8 DPR:$src2))))]>;
+ (vnotd DPR:$src2))))]>;
def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
(ins QPR:$src1, QPR:$src2), N3RegFrm, IIC_VBINiQ,
"vbic", "$dst, $src1, $src2", "",
[(set QPR:$dst, (v4i32 (and QPR:$src1,
- (vnot16 QPR:$src2))))]>;
+ (vnotq QPR:$src2))))]>;
// VORN : Vector Bitwise OR NOT
def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
(ins DPR:$src1, DPR:$src2), N3RegFrm, IIC_VBINiD,
"vorn", "$dst, $src1, $src2", "",
[(set DPR:$dst, (v2i32 (or DPR:$src1,
- (vnot8 DPR:$src2))))]>;
+ (vnotd DPR:$src2))))]>;
def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
(ins QPR:$src1, QPR:$src2), N3RegFrm, IIC_VBINiQ,
"vorn", "$dst, $src1, $src2", "",
[(set QPR:$dst, (v4i32 (or QPR:$src1,
- (vnot16 QPR:$src2))))]>;
+ (vnotq QPR:$src2))))]>;
+
+// VMVN : Vector Bitwise NOT (Immediate)
+
+let isReMaterializable = 1 in {
+def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
+ "vmvn", "i16", "$dst, $SIMM", "",
+ [(set DPR:$dst, (v4i16 (NEONvmvnImm timm:$SIMM)))]>;
+def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
+ "vmvn", "i16", "$dst, $SIMM", "",
+ [(set QPR:$dst, (v8i16 (NEONvmvnImm timm:$SIMM)))]>;
+
+def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
+ "vmvn", "i32", "$dst, $SIMM", "",
+ [(set DPR:$dst, (v2i32 (NEONvmvnImm timm:$SIMM)))]>;
+def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
+ "vmvn", "i32", "$dst, $SIMM", "",
+ [(set QPR:$dst, (v4i32 (NEONvmvnImm timm:$SIMM)))]>;
+}
// VMVN : Vector Bitwise NOT
def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
(outs DPR:$dst), (ins DPR:$src), IIC_VSUBiD,
"vmvn", "$dst, $src", "",
- [(set DPR:$dst, (v2i32 (vnot8 DPR:$src)))]>;
+ [(set DPR:$dst, (v2i32 (vnotd DPR:$src)))]>;
def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
(outs QPR:$dst), (ins QPR:$src), IIC_VSUBiD,
"vmvn", "$dst, $src", "",
- [(set QPR:$dst, (v4i32 (vnot16 QPR:$src)))]>;
-def : Pat<(v2i32 (vnot8 DPR:$src)), (VMVNd DPR:$src)>;
-def : Pat<(v4i32 (vnot16 QPR:$src)), (VMVNq QPR:$src)>;
+ [(set QPR:$dst, (v4i32 (vnotq QPR:$src)))]>;
+def : Pat<(v2i32 (vnotd DPR:$src)), (VMVNd DPR:$src)>;
+def : Pat<(v4i32 (vnotq QPR:$src)), (VMVNq QPR:$src)>;
// VBSL : Vector Bitwise Select
def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
@@ -2399,14 +2425,14 @@ def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
"vbsl", "$dst, $src2, $src3", "$src1 = $dst",
[(set DPR:$dst,
(v2i32 (or (and DPR:$src2, DPR:$src1),
- (and DPR:$src3, (vnot8 DPR:$src1)))))]>;
+ (and DPR:$src3, (vnotd DPR:$src1)))))]>;
def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
(ins QPR:$src1, QPR:$src2, QPR:$src3),
N3RegFrm, IIC_VCNTiQ,
"vbsl", "$dst, $src2, $src3", "$src1 = $dst",
[(set QPR:$dst,
(v4i32 (or (and QPR:$src2, QPR:$src1),
- (and QPR:$src3, (vnot16 QPR:$src1)))))]>;
+ (and QPR:$src3, (vnotq QPR:$src1)))))]>;
// VBIF : Vector Bitwise Insert if False
// like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
@@ -2740,20 +2766,19 @@ defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
// Vector Negate.
-def vneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>;
-def vneg8 : PatFrag<(ops node:$in),
- (sub (bitconvert (v8i8 immAllZerosV)), node:$in)>;
-def vneg16 : PatFrag<(ops node:$in),
- (sub (bitconvert (v16i8 immAllZerosV)), node:$in)>;
+def vnegd : PatFrag<(ops node:$in),
+ (sub (bitconvert (v2i32 NEONimmAllZerosV)), node:$in)>;
+def vnegq : PatFrag<(ops node:$in),
+ (sub (bitconvert (v4i32 NEONimmAllZerosV)), node:$in)>;
class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
: N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set DPR:$dst, (Ty (vneg8 DPR:$src)))]>;
+ [(set DPR:$dst, (Ty (vnegd DPR:$src)))]>;
class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
: N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
IIC_VSHLiD, OpcodeStr, Dt, "$dst, $src", "",
- [(set QPR:$dst, (Ty (vneg16 QPR:$src)))]>;
+ [(set QPR:$dst, (Ty (vnegq QPR:$src)))]>;
// VNEG : Vector Negate (integer)
def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
@@ -2773,12 +2798,12 @@ def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
"vneg", "f32", "$dst, $src", "",
[(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
-def : Pat<(v8i8 (vneg8 DPR:$src)), (VNEGs8d DPR:$src)>;
-def : Pat<(v4i16 (vneg8 DPR:$src)), (VNEGs16d DPR:$src)>;
-def : Pat<(v2i32 (vneg8 DPR:$src)), (VNEGs32d DPR:$src)>;
-def : Pat<(v16i8 (vneg16 QPR:$src)), (VNEGs8q QPR:$src)>;
-def : Pat<(v8i16 (vneg16 QPR:$src)), (VNEGs16q QPR:$src)>;
-def : Pat<(v4i32 (vneg16 QPR:$src)), (VNEGs32q QPR:$src)>;
+def : Pat<(v8i8 (vnegd DPR:$src)), (VNEGs8d DPR:$src)>;
+def : Pat<(v4i16 (vnegd DPR:$src)), (VNEGs16d DPR:$src)>;
+def : Pat<(v2i32 (vnegd DPR:$src)), (VNEGs32d DPR:$src)>;
+def : Pat<(v16i8 (vnegq QPR:$src)), (VNEGs8q QPR:$src)>;
+def : Pat<(v8i16 (vnegq QPR:$src)), (VNEGs16q QPR:$src)>;
+def : Pat<(v4i32 (vnegq QPR:$src)), (VNEGs32q QPR:$src)>;
// VQNEG : Vector Saturating Negate
defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
@@ -2832,77 +2857,42 @@ def VMOVQQQQ : PseudoInst<(outs QQQQPR:$dst), (ins QQQQPR:$src),
// VMOV : Vector Move (Immediate)
-// VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
-def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 1, *CurDAG);
-}]>;
-def vmovImm8 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
-}], VMOV_get_imm8>;
-
-// VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
-def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 2, *CurDAG);
-}]>;
-def vmovImm16 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
-}], VMOV_get_imm16>;
-
-// VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
-def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 4, *CurDAG);
-}]>;
-def vmovImm32 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
-}], VMOV_get_imm32>;
-
-// VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
-def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 8, *CurDAG);
-}]>;
-def vmovImm64 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
-}], VMOV_get_imm64>;
-
-// Note: Some of the cmode bits in the following VMOV instructions need to
-// be encoded based on the immed values.
-
let isReMaterializable = 1 in {
def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$dst, $SIMM", "",
- [(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
+ [(set DPR:$dst, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$dst, $SIMM", "",
- [(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
+ [(set QPR:$dst, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
+def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$dst, $SIMM", "",
- [(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
-def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
+ [(set DPR:$dst, (v4i16 (NEONvmovImm timm:$SIMM)))]>;
+def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$dst, $SIMM", "",
- [(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
+ [(set QPR:$dst, (v8i16 (NEONvmovImm timm:$SIMM)))]>;
-def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
+def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$dst, $SIMM", "",
- [(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
-def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
+ [(set DPR:$dst, (v2i32 (NEONvmovImm timm:$SIMM)))]>;
+def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$dst, $SIMM", "",
- [(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
+ [(set QPR:$dst, (v4i32 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$dst, $SIMM", "",
- [(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
+ [(set DPR:$dst, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$dst, $SIMM", "",
- [(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
+ [(set QPR:$dst, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
} // isReMaterializable
// VMOV : Vector Get Lane (move scalar to ARM core register)
@@ -3122,17 +3112,6 @@ def VDUPfqf : N2V<0b11, 0b11, {?,1}, {0,0}, 0b11000, 1, 0,
IIC_VMOVD, "vdup", "32", "$dst, ${src:lane}", "",
[(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
-def : Pat<(v2i64 (NEONvduplane (v2i64 QPR:$src), imm:$lane)),
- (INSERT_SUBREG QPR:$src,
- (i64 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_f64_reg imm:$lane))),
- (DSubReg_f64_other_reg imm:$lane))>;
-def : Pat<(v2f64 (NEONvduplane (v2f64 QPR:$src), imm:$lane)),
- (INSERT_SUBREG QPR:$src,
- (f64 (EXTRACT_SUBREG QPR:$src,
- (DSubReg_f64_reg imm:$lane))),
- (DSubReg_f64_other_reg imm:$lane))>;
-
// VMOVN : Vector Narrowing Move
defm VMOVN : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVD,
"vmovn", "i", int_arm_neon_vmovn>;
@@ -3319,22 +3298,16 @@ let hasExtraSrcRegAllocReq = 1 in {
def VTBL2
: N3V<1,1,0b11,0b1001,0,0, (outs DPR:$dst),
(ins DPR:$tbl1, DPR:$tbl2, DPR:$src), NVTBLFrm, IIC_VTB2,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl2
- DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
+ "vtbl", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "", []>;
def VTBL3
: N3V<1,1,0b11,0b1010,0,0, (outs DPR:$dst),
(ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src), NVTBLFrm, IIC_VTB3,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl3
- DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
+ "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "", []>;
def VTBL4
: N3V<1,1,0b11,0b1011,0,0, (outs DPR:$dst),
(ins DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src),
NVTBLFrm, IIC_VTB4,
- "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src", "",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbl4 DPR:$tbl1, DPR:$tbl2,
- DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
+ "vtbl", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src", "", []>;
} // hasExtraSrcRegAllocReq = 1
// VTBX : Vector Table Extension
@@ -3348,23 +3321,18 @@ let hasExtraSrcRegAllocReq = 1 in {
def VTBX2
: N3V<1,1,0b11,0b1001,1,0, (outs DPR:$dst),
(ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src), NVTBLFrm, IIC_VTBX2,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx2
- DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$src)))]>;
+ "vtbx", "8", "$dst, \\{$tbl1, $tbl2\\}, $src", "$orig = $dst", []>;
def VTBX3
: N3V<1,1,0b11,0b1010,1,0, (outs DPR:$dst),
(ins DPR:$orig, DPR:$tbl1, DPR:$tbl2, DPR:$tbl3, DPR:$src),
NVTBLFrm, IIC_VTBX3,
- "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src", "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx3 DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$src)))]>;
+ "vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3\\}, $src",
+ "$orig = $dst", []>;
def VTBX4
: N3V<1,1,0b11,0b1011,1,0, (outs DPR:$dst), (ins DPR:$orig, DPR:$tbl1,
DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src), NVTBLFrm, IIC_VTBX4,
"vtbx", "8", "$dst, \\{$tbl1, $tbl2, $tbl3, $tbl4\\}, $src",
- "$orig = $dst",
- [(set DPR:$dst, (v8i8 (int_arm_neon_vtbx4 DPR:$orig, DPR:$tbl1,
- DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
+ "$orig = $dst", []>;
} // hasExtraSrcRegAllocReq = 1
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
index 40f924b..bc0790d 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -894,11 +894,11 @@ def tLEApcrel : T1I<(outs tGPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
"adr$p\t$dst, #$label", []>,
T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
+} // neverHasSideEffects
def tLEApcrelJT : T1I<(outs tGPR:$dst),
(ins i32imm:$label, nohash_imm:$id, pred:$p),
IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>,
T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-} // neverHasSideEffects
//===----------------------------------------------------------------------===//
// TLS Instructions
@@ -923,18 +923,18 @@ let isCall = 1,
// except for our own input by listing the relevant registers in Defs. By
// doing so, we also cause the prologue/epilogue code to actively preserve
// all of the callee-saved resgisters, which is exactly what we want.
-// The current SP is passed in $val, and we reuse the reg as a scratch.
+// $val is a scratch register for our use.
let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ] in {
+ [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ], hasSideEffects = 1,
+ isBarrier = 1 in {
def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary,
- "str\t$val, [$src, #8]\t${:comment} begin eh.setjmp\n"
- "\tmov\t$val, pc\n"
- "\tadds\t$val, #7\n"
- "\tstr\t$val, [$src, #4]\n"
- "\tmovs\tr0, #0\n"
- "\tb\t1f\n"
- "\tmovs\tr0, #1\t${:comment} end eh.setjmp\n"
+ "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
+ "adds\t$val, #7\n\t"
+ "str\t$val, [$src, #4]\n\t"
+ "movs\tr0, #0\n\t"
+ "b\t1f\n\t"
+ "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
"1:", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
}
@@ -1037,7 +1037,8 @@ def : T1Pat<(i32 imm0_255_comp:$src),
// scheduling.
let isReMaterializable = 1 in
def tLDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary, "${:comment} ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
+ NoItinerary,
+ "${:comment} ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
[(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
imm:$cp))]>,
Requires<[IsThumb1Only]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
index b91c089..bbe675e 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -122,6 +122,10 @@ def imm0_255_neg : PatLeaf<(i32 imm), [{
return (uint32_t)(-N->getZExtValue()) < 255;
}], imm_neg_XFORM>;
+def imm0_255_not : PatLeaf<(i32 imm), [{
+ return (uint32_t)(~N->getZExtValue()) < 255;
+}], imm_comp_XFORM>;
+
// Define Thumb2 specific addressing modes.
// t2addrmode_imm12 := reg + imm12
@@ -637,8 +641,7 @@ multiclass T2I_st<bits<2> opcod, string opc, PatFrag opnode> {
multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
opc, ".w\t$dst, $src",
- [(set GPR:$dst, (opnode GPR:$src))]>,
- Requires<[HasT2ExtractPack]> {
+ [(set GPR:$dst, (opnode GPR:$src))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -649,8 +652,7 @@ multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
}
def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi,
opc, ".w\t$dst, $src, ror $rot",
- [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]>,
- Requires<[HasT2ExtractPack]> {
+ [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]> {
let Inst{31-27} = 0b11111;
let Inst{26-23} = 0b0100;
let Inst{22-20} = opcod;
@@ -661,8 +663,8 @@ multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
}
}
-// SXTB16 and UXTB16 do not need the .w qualifier.
-multiclass T2I_unary_rrot_nw<bits<3> opcod, string opc, PatFrag opnode> {
+// UXTB16 - Requres T2ExtractPack, does not need the .w qualifier.
+multiclass T2I_unary_rrot_uxtb16<bits<3> opcod, string opc, PatFrag opnode> {
def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
opc, "\t$dst, $src",
[(set GPR:$dst, (opnode GPR:$src))]>,
@@ -689,9 +691,9 @@ multiclass T2I_unary_rrot_nw<bits<3> opcod, string opc, PatFrag opnode> {
}
}
-// DO variant - disassembly only, no pattern
-
-multiclass T2I_unary_rrot_DO<bits<3> opcod, string opc> {
+// SXTB16 - Requres T2ExtractPack, does not need the .w qualifier, no pattern
+// supported yet.
+multiclass T2I_unary_rrot_sxtb16<bits<3> opcod, string opc> {
def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
opc, "\t$dst, $src", []> {
let Inst{31-27} = 0b11111;
@@ -787,6 +789,7 @@ def t2LEApcrel : T2XI<(outs GPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi,
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
}
+} // neverHasSideEffects
def t2LEApcrelJT : T2XI<(outs GPR:$dst),
(ins i32imm:$label, nohash_imm:$id, pred:$p), IIC_iALUi,
"adr$p.w\t$dst, #${label}_${id}", []> {
@@ -798,7 +801,6 @@ def t2LEApcrelJT : T2XI<(outs GPR:$dst),
let Inst{19-16} = 0b1111; // Rn
let Inst{15} = 0;
}
-} // neverHasSideEffects
// ADD r, sp, {so_imm|i12}
def t2ADDrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
@@ -1330,7 +1332,7 @@ defm t2SXTB : T2I_unary_rrot<0b100, "sxtb",
UnOpFrag<(sext_inreg node:$Src, i8)>>;
defm t2SXTH : T2I_unary_rrot<0b000, "sxth",
UnOpFrag<(sext_inreg node:$Src, i16)>>;
-defm t2SXTB16 : T2I_unary_rrot_DO<0b010, "sxtb16">;
+defm t2SXTB16 : T2I_unary_rrot_sxtb16<0b010, "sxtb16">;
defm t2SXTAB : T2I_bin_rrot<0b100, "sxtab",
BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
@@ -1347,13 +1349,13 @@ defm t2UXTB : T2I_unary_rrot<0b101, "uxtb",
UnOpFrag<(and node:$Src, 0x000000FF)>>;
defm t2UXTH : T2I_unary_rrot<0b001, "uxth",
UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm t2UXTB16 : T2I_unary_rrot_nw<0b011, "uxtb16",
+defm t2UXTB16 : T2I_unary_rrot_uxtb16<0b011, "uxtb16",
UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
def : T2Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
- (t2UXTB16r_rot GPR:$Src, 24)>;
+ (t2UXTB16r_rot GPR:$Src, 24)>, Requires<[HasT2ExtractPack]>;
def : T2Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
- (t2UXTB16r_rot GPR:$Src, 8)>;
+ (t2UXTB16r_rot GPR:$Src, 8)>, Requires<[HasT2ExtractPack]>;
defm t2UXTAB : T2I_bin_rrot<0b101, "uxtab",
BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -1393,13 +1395,32 @@ defm t2RSBS : T2I_rbin_s_is <0b1110, "rsb",
BinOpFrag<(subc node:$LHS, node:$RHS)>>;
// (sub X, imm) gets canonicalized to (add X, -imm). Match this form.
+// The assume-no-carry-in form uses the negation of the input since add/sub
+// assume opposite meanings of the carry flag (i.e., carry == !borrow).
+// See the definition of AddWithCarry() in the ARM ARM A2.2.1 for the gory
+// details.
+// The AddedComplexity preferences the first variant over the others since
+// it can be shrunk to a 16-bit wide encoding, while the others cannot.
+let AddedComplexity = 1 in
+def : T2Pat<(add GPR:$src, imm0_255_neg:$imm),
+ (t2SUBri GPR:$src, imm0_255_neg:$imm)>;
+def : T2Pat<(add GPR:$src, t2_so_imm_neg:$imm),
+ (t2SUBri GPR:$src, t2_so_imm_neg:$imm)>;
+def : T2Pat<(add GPR:$src, imm0_4095_neg:$imm),
+ (t2SUBri12 GPR:$src, imm0_4095_neg:$imm)>;
+let AddedComplexity = 1 in
+def : T2Pat<(addc GPR:$src, imm0_255_neg:$imm),
+ (t2SUBSri GPR:$src, imm0_255_neg:$imm)>;
+def : T2Pat<(addc GPR:$src, t2_so_imm_neg:$imm),
+ (t2SUBSri GPR:$src, t2_so_imm_neg:$imm)>;
+// The with-carry-in form matches bitwise not instead of the negation.
+// Effectively, the inverse interpretation of the carry flag already accounts
+// for part of the negation.
let AddedComplexity = 1 in
-def : T2Pat<(add GPR:$src, imm0_255_neg:$imm),
- (t2SUBri GPR:$src, imm0_255_neg:$imm)>;
-def : T2Pat<(add GPR:$src, t2_so_imm_neg:$imm),
- (t2SUBri GPR:$src, t2_so_imm_neg:$imm)>;
-def : T2Pat<(add GPR:$src, imm0_4095_neg:$imm),
- (t2SUBri12 GPR:$src, imm0_4095_neg:$imm)>;
+def : T2Pat<(adde GPR:$src, imm0_255_not:$imm),
+ (t2SBCSri GPR:$src, imm0_255_not:$imm)>;
+def : T2Pat<(adde GPR:$src, t2_so_imm_not:$imm),
+ (t2SBCSri GPR:$src, t2_so_imm_not:$imm)>;
// Select Bytes -- for disassembly only
@@ -2389,37 +2410,36 @@ let isCall = 1,
// except for our own input by listing the relevant registers in Defs. By
// doing so, we also cause the prologue/epilogue code to actively preserve
// all of the callee-saved resgisters, which is exactly what we want.
-// The current SP is passed in $val, and we reuse the reg as a scratch.
+// $val is a scratch register for our use.
let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
- D31 ] in {
+ D31 ], hasSideEffects = 1, isBarrier = 1 in {
def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins GPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary,
- "str\t$val, [$src, #8]\t${:comment} begin eh.setjmp\n"
- "\tmov\t$val, pc\n"
- "\tadds\t$val, #7\n"
- "\tstr\t$val, [$src, #4]\n"
- "\tmovs\tr0, #0\n"
- "\tb\t1f\n"
- "\tmovs\tr0, #1\t${:comment} end eh.setjmp\n"
+ "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
+ "adds\t$val, #7\n\t"
+ "str\t$val, [$src, #4]\n\t"
+ "movs\tr0, #0\n\t"
+ "b\t1f\n\t"
+ "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
"1:", "",
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, tGPR:$val))]>,
Requires<[IsThumb2, HasVFP2]>;
}
let Defs =
- [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ] in {
+ [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ],
+ hasSideEffects = 1, isBarrier = 1 in {
def t2Int_eh_sjlj_setjmp_nofp : Thumb2XI<(outs), (ins GPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary,
- "str\t$val, [$src, #8]\t${:comment} begin eh.setjmp\n"
- "\tmov\t$val, pc\n"
- "\tadds\t$val, #7\n"
- "\tstr\t$val, [$src, #4]\n"
- "\tmovs\tr0, #0\n"
- "\tb\t1f\n"
- "\tmovs\tr0, #1\t${:comment} end eh.setjmp\n"
+ "mov\t$val, pc\t${:comment} begin eh.setjmp\n\t"
+ "adds\t$val, #7\n\t"
+ "str\t$val, [$src, #4]\n\t"
+ "movs\tr0, #0\n\t"
+ "b\t1f\n\t"
+ "movs\tr0, #1\t${:comment} end eh.setjmp\n\t"
"1:", "",
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, tGPR:$val))]>,
Requires<[IsThumb2, NoVFP]>;
@@ -2438,7 +2458,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
hasExtraDefRegAllocReq = 1 in
def t2LDM_RET : T2XIt<(outs GPR:$wb), (ins addrmode4:$addr, pred:$p,
reglist:$dsts, variable_ops), IIC_Br,
- "ldm${addr:submode}${p}${addr:wide}\t$addr, $dsts",
+ "ldm${addr:submode}${p}${addr:wide}\t$addr!, $dsts",
"$addr.addr = $wb", []> {
let Inst{31-27} = 0b11101;
let Inst{26-25} = 0b00;
@@ -2529,6 +2549,7 @@ def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br,
// IT block
+let Defs = [ITSTATE] in
def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
AddrModeNone, Size2Bytes, IIC_iALUx,
"it$mask\t$cc", "", []> {
@@ -2691,7 +2712,8 @@ def : T2Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
// scheduling.
let canFoldAsLoad = 1, isReMaterializable = 1 in
def t2LDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
- NoItinerary, "${:comment} ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
+ NoItinerary,
+ "${:comment} ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
[(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
imm:$cp))]>,
Requires<[IsThumb2]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
index 54474cf..84c23e1 100644
--- a/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
+++ b/contrib/llvm/lib/Target/ARM/ARMInstrVFP.td
@@ -255,25 +255,25 @@ def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
// Between half-precision and single-precision. For disassembly only.
-def VCVTBSH : ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
+def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
def : ARMPat<(f32_to_f16 SPR:$a),
(i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
-def VCVTBHS : ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
+def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
def : ARMPat<(f16_to_f32 GPR:$a),
(VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
-def VCVTTSH : ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
+def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
-def VCVTTHS : ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
+def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
/* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
[/* For disassembly only; pattern left blank */]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
index ff332b7..f5d9eff 100644
--- a/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMJITInfo.h
@@ -143,7 +143,8 @@ namespace llvm {
JumpTableId2AddrMap[JTI] = Addr;
}
- /// getPCLabelAddr - Retrieve the address of the PC label of the specified id.
+ /// getPCLabelAddr - Retrieve the address of the PC label of the
+ /// specified id.
intptr_t getPCLabelAddr(unsigned Id) const {
DenseMap<unsigned, intptr_t>::const_iterator I = PCLabelMap.find(Id);
assert(I != PCLabelMap.end());
diff --git a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 8585c1e..f80e316 100644
--- a/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -74,11 +74,14 @@ namespace {
private:
struct MemOpQueueEntry {
int Offset;
+ unsigned Reg;
+ bool isKill;
unsigned Position;
MachineBasicBlock::iterator MBBI;
bool Merged;
- MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
- : Offset(o), Position(p), MBBI(i), Merged(false) {}
+ MemOpQueueEntry(int o, unsigned r, bool k, unsigned p,
+ MachineBasicBlock::iterator i)
+ : Offset(o), Reg(r), isKill(k), Position(p), MBBI(i), Merged(false) {}
};
typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
typedef MemOpQueue::iterator MemOpQueueIter;
@@ -128,30 +131,30 @@ namespace {
static int getLoadStoreMultipleOpcode(int Opcode) {
switch (Opcode) {
case ARM::LDR:
- NumLDMGened++;
+ ++NumLDMGened;
return ARM::LDM;
case ARM::STR:
- NumSTMGened++;
+ ++NumSTMGened;
return ARM::STM;
case ARM::t2LDRi8:
case ARM::t2LDRi12:
- NumLDMGened++;
+ ++NumLDMGened;
return ARM::t2LDM;
case ARM::t2STRi8:
case ARM::t2STRi12:
- NumSTMGened++;
+ ++NumSTMGened;
return ARM::t2STM;
case ARM::VLDRS:
- NumVLDMGened++;
+ ++NumVLDMGened;
return ARM::VLDMS;
case ARM::VSTRS:
- NumVSTMGened++;
+ ++NumVSTMGened;
return ARM::VSTMS;
case ARM::VLDRD:
- NumVLDMGened++;
+ ++NumVLDMGened;
return ARM::VLDMD;
case ARM::VSTRD:
- NumVSTMGened++;
+ ++NumVSTMGened;
return ARM::VSTMD;
default: llvm_unreachable("Unhandled opcode!");
}
@@ -264,45 +267,59 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
// MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on
// success.
-void ARMLoadStoreOpt::
-MergeOpsUpdate(MachineBasicBlock &MBB,
- MemOpQueue &memOps,
- unsigned memOpsBegin,
- unsigned memOpsEnd,
- unsigned insertAfter,
- int Offset,
- unsigned Base,
- bool BaseKill,
- int Opcode,
- ARMCC::CondCodes Pred,
- unsigned PredReg,
- unsigned Scratch,
- DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
+ MemOpQueue &memOps,
+ unsigned memOpsBegin, unsigned memOpsEnd,
+ unsigned insertAfter, int Offset,
+ unsigned Base, bool BaseKill,
+ int Opcode,
+ ARMCC::CondCodes Pred, unsigned PredReg,
+ unsigned Scratch,
+ DebugLoc dl,
+ SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
// First calculate which of the registers should be killed by the merged
// instruction.
- SmallVector<std::pair<unsigned, bool>, 8> Regs;
const unsigned insertPos = memOps[insertAfter].Position;
+
+ SmallSet<unsigned, 4> UnavailRegs;
+ SmallSet<unsigned, 4> KilledRegs;
+ DenseMap<unsigned, unsigned> Killer;
+ for (unsigned i = 0; i < memOpsBegin; ++i) {
+ if (memOps[i].Position < insertPos && memOps[i].isKill) {
+ unsigned Reg = memOps[i].Reg;
+ if (memOps[i].Merged)
+ UnavailRegs.insert(Reg);
+ else {
+ KilledRegs.insert(Reg);
+ Killer[Reg] = i;
+ }
+ }
+ }
+ for (unsigned i = memOpsEnd, e = memOps.size(); i != e; ++i) {
+ if (memOps[i].Position < insertPos && memOps[i].isKill) {
+ unsigned Reg = memOps[i].Reg;
+ KilledRegs.insert(Reg);
+ Killer[Reg] = i;
+ }
+ }
+
+ SmallVector<std::pair<unsigned, bool>, 8> Regs;
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
- const MachineOperand &MO = memOps[i].MBBI->getOperand(0);
- unsigned Reg = MO.getReg();
- bool isKill = MO.isKill();
+ unsigned Reg = memOps[i].Reg;
+ if (UnavailRegs.count(Reg))
+ // Register is killed before and it's not easy / possible to update the
+ // kill marker on already merged instructions. Abort.
+ return;
// If we are inserting the merged operation after an unmerged operation that
// uses the same register, make sure to transfer any kill flag.
- for (unsigned j = memOpsEnd, e = memOps.size(); !isKill && j != e; ++j)
- if (memOps[j].Position<insertPos) {
- const MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
- if (MOJ.getReg() == Reg && MOJ.isKill())
- isKill = true;
- }
-
+ bool isKill = memOps[i].isKill || KilledRegs.count(Reg);
Regs.push_back(std::make_pair(Reg, isKill));
}
// Try to do the merge.
MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
- Loc++;
+ ++Loc;
if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
Pred, PredReg, Scratch, dl, Regs))
return;
@@ -311,13 +328,13 @@ MergeOpsUpdate(MachineBasicBlock &MBB,
Merges.push_back(prior(Loc));
for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
// Remove kill flags from any unmerged memops that come before insertPos.
- if (Regs[i-memOpsBegin].second)
- for (unsigned j = memOpsEnd, e = memOps.size(); j != e; ++j)
- if (memOps[j].Position<insertPos) {
- MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
- if (MOJ.getReg() == Regs[i-memOpsBegin].first && MOJ.isKill())
- MOJ.setIsKill(false);
- }
+ if (Regs[i-memOpsBegin].second) {
+ unsigned Reg = Regs[i-memOpsBegin].first;
+ if (KilledRegs.count(Reg)) {
+ unsigned j = Killer[Reg];
+ memOps[j].MBBI->getOperand(0).setIsKill(false);
+ }
+ }
MBB.erase(memOps[i].MBBI);
memOps[i].Merged = true;
}
@@ -517,8 +534,11 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
}
// Try merging with the previous instruction.
- if (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator BeginMBBI = MBB.begin();
+ if (MBBI != BeginMBBI) {
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
+ --PrevMBBI;
if (isAM4) {
if (Mode == ARM_AM::ia &&
isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
@@ -541,8 +561,11 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
}
// Try merging with the next instruction.
- if (!DoMerge && MBBI != MBB.end()) {
+ MachineBasicBlock::iterator EndMBBI = MBB.end();
+ if (!DoMerge && MBBI != EndMBBI) {
MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
+ ++NextMBBI;
if (isAM4) {
if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
@@ -669,8 +692,11 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
unsigned Limit = isAM5 ? 0 : (isAM2 ? 0x1000 : 0x100);
// Try merging with the previous instruction.
- if (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator BeginMBBI = MBB.begin();
+ if (MBBI != BeginMBBI) {
MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
+ while (PrevMBBI != BeginMBBI && PrevMBBI->isDebugValue())
+ --PrevMBBI;
if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
DoMerge = true;
AddSub = ARM_AM::sub;
@@ -685,8 +711,11 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
}
// Try merging with the next instruction.
- if (!DoMerge && MBBI != MBB.end()) {
+ MachineBasicBlock::iterator EndMBBI = MBB.end();
+ if (!DoMerge && MBBI != EndMBBI) {
MachineBasicBlock::iterator NextMBBI = llvm::next(MBBI);
+ while (NextMBBI != EndMBBI && NextMBBI->isDebugValue())
+ ++NextMBBI;
if (!isAM5 &&
isMatchingDecrement(NextMBBI, Base, Bytes, Limit, Pred, PredReg)) {
DoMerge = true;
@@ -759,18 +788,21 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
/// isMemoryOp - Returns true if instruction is a memory operations (that this
/// pass is capable of operating on).
static bool isMemoryOp(const MachineInstr *MI) {
- if (MI->hasOneMemOperand()) {
- const MachineMemOperand *MMO = *MI->memoperands_begin();
+ // When no memory operands are present, conservatively assume unaligned,
+ // volatile, unfoldable.
+ if (!MI->hasOneMemOperand())
+ return false;
- // Don't touch volatile memory accesses - we may be changing their order.
- if (MMO->isVolatile())
- return false;
+ const MachineMemOperand *MMO = *MI->memoperands_begin();
- // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
- // not.
- if (MMO->getAlignment() < 4)
- return false;
- }
+ // Don't touch volatile memory accesses - we may be changing their order.
+ if (MMO->isVolatile())
+ return false;
+
+ // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
+ // not.
+ if (MMO->getAlignment() < 4)
+ return false;
// str <undef> could probably be eliminated entirely, but for now we just want
// to avoid making a mess of it.
@@ -898,6 +930,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
return false;
+ MachineBasicBlock::iterator NewBBI = MBBI;
bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
bool EvenDeadKill = isLd ?
@@ -942,6 +975,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
++NumSTRD2STM;
}
+ NewBBI = llvm::prior(MBBI);
} else {
// Split into two instructions.
assert((!isT2 || !OffReg) &&
@@ -962,14 +996,15 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
OddReg, OddDeadKill, false,
BaseReg, false, BaseUndef, OffReg, false, OffUndef,
Pred, PredReg, TII, isT2);
+ NewBBI = llvm::prior(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
EvenReg, EvenDeadKill, false,
BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
Pred, PredReg, TII, isT2);
} else {
if (OddReg == EvenReg && EvenDeadKill) {
- // If the two source operands are the same, the kill marker is probably
- // on the first one. e.g.
+ // If the two source operands are the same, the kill marker is
+ // probably on the first one. e.g.
// t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
EvenDeadKill = false;
OddDeadKill = true;
@@ -978,6 +1013,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
EvenReg, EvenDeadKill, EvenUndef,
BaseReg, false, BaseUndef, OffReg, false, OffUndef,
Pred, PredReg, TII, isT2);
+ NewBBI = llvm::prior(MBBI);
InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
OddReg, OddDeadKill, OddUndef,
BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
@@ -989,8 +1025,9 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
++NumSTRD2STR;
}
- MBBI = prior(MBBI);
MBB.erase(MI);
+ MBBI = NewBBI;
+ return true;
}
return false;
}
@@ -1023,6 +1060,9 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
if (isMemOp) {
int Opcode = MBBI->getOpcode();
unsigned Size = getLSMultipleTransferSize(MBBI);
+ const MachineOperand &MO = MBBI->getOperand(0);
+ unsigned Reg = MO.getReg();
+ bool isKill = MO.isDef() ? false : MO.isKill();
unsigned Base = MBBI->getOperand(1).getReg();
unsigned PredReg = 0;
ARMCC::CondCodes Pred = llvm::getInstrPredicate(MBBI, PredReg);
@@ -1044,8 +1084,8 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
CurrSize = Size;
CurrPred = Pred;
CurrPredReg = PredReg;
- MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
+ MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill, Position, MBBI));
+ ++NumMemOps;
Advance = true;
} else {
if (Clobber) {
@@ -1057,15 +1097,17 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
// No need to match PredReg.
// Continue adding to the queue.
if (Offset > MemOps.back().Offset) {
- MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
+ MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill,
+ Position, MBBI));
+ ++NumMemOps;
Advance = true;
} else {
for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
I != E; ++I) {
if (Offset < I->Offset) {
- MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
- NumMemOps++;
+ MemOps.insert(I, MemOpQueueEntry(Offset, Reg, isKill,
+ Position, MBBI));
+ ++NumMemOps;
Advance = true;
break;
} else if (Offset == I->Offset) {
@@ -1078,7 +1120,12 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
}
}
- if (Advance) {
+ if (MBBI->isDebugValue()) {
+ ++MBBI;
+ if (MBBI == E)
+ // Reach the end of the block, try merging the memory instructions.
+ TryMerge = true;
+ } else if (Advance) {
++Position;
++MBBI;
if (MBBI == E)
@@ -1279,7 +1326,7 @@ static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
// some day.
SmallSet<unsigned, 4> AddedRegPressure;
while (++I != E) {
- if (MemOps.count(&*I))
+ if (I->isDebugValue() || MemOps.count(&*I))
continue;
const TargetInstrDesc &TID = I->getDesc();
if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
@@ -1411,7 +1458,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
std::sort(Ops.begin(), Ops.end(), OffsetCompare());
// The loads / stores of the same base are in order. Scan them from first to
- // last and check for the followins:
+ // last and check for the following:
// 1. Any def of base.
// 2. Any gaps.
while (Ops.size() > 1) {
@@ -1474,7 +1521,8 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
} else {
// This is the new location for the loads / stores.
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
- while (InsertPos != MBB->end() && MemOps.count(InsertPos))
+ while (InsertPos != MBB->end()
+ && (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
++InsertPos;
// If we are moving a pair of loads / stores, see if it makes sense
@@ -1562,7 +1610,9 @@ ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
break;
}
- MI2LocMap[MI] = Loc++;
+ if (!MI->isDebugValue())
+ MI2LocMap[MI] = ++Loc;
+
if (!isMemoryOp(MI))
continue;
unsigned PredReg = 0;
diff --git a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 0134276..7e57a1c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -88,6 +88,9 @@ class ARMFunctionInfo : public MachineFunctionInfo {
/// VarArgsFrameIndex - FrameIndex for start of varargs area.
int VarArgsFrameIndex;
+ /// HasITBlocks - True if IT blocks have been inserted.
+ bool HasITBlocks;
+
public:
ARMFunctionInfo() :
isThumb(false),
@@ -97,7 +100,8 @@ public:
FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
- JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0) {}
+ JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
+ HasITBlocks(false) {}
explicit ARMFunctionInfo(MachineFunction &MF) :
isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
@@ -108,7 +112,8 @@ public:
GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
SpilledCSRegs(MF.getTarget().getRegisterInfo()->getNumRegs()),
- JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0) {}
+ JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
+ HasITBlocks(false) {}
bool isThumbFunction() const { return isThumb; }
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
@@ -229,6 +234,9 @@ public:
int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+ bool hasITBlocks() const { return HasITBlocks; }
+ void setHasITBlocks(bool h) { HasITBlocks = h; }
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
index 6beca8b..d020f3c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
+++ b/contrib/llvm/lib/Target/ARM/ARMRegisterInfo.td
@@ -153,11 +153,11 @@ def Q15 : ARMReg<15, "q15", [D30, D31]>;
// Pseudo 256-bit registers to represent pairs of Q registers. These should
// never be present in the emitted code.
-// These are used for NEON load / store instructions, e.g. vld4, vst3.
-// NOTE: It's possible to define more QQ registers since technical the
-// starting D register number doesn't have to be multiple of 4. e.g.
-// D1, D2, D3, D4 would be a legal quad. But that would make the sub-register
-// stuffs very messy.
+// These are used for NEON load / store instructions, e.g., vld4, vst3.
+// NOTE: It's possible to define more QQ registers since technically the
+// starting D register number doesn't have to be multiple of 4, e.g.,
+// D1, D2, D3, D4 would be a legal quad, but that would make the subregister
+// stuff very messy.
let SubRegIndices = [qsub_0, qsub_1] in {
let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1),
(ssub_4 qsub_1, ssub_0), (ssub_5 qsub_1, ssub_1),
@@ -183,7 +183,8 @@ let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1),
(ssub_8 qqsub_1, ssub_0), (ssub_9 qqsub_1, ssub_1),
(ssub_10 qqsub_1, ssub_2), (ssub_11 qqsub_1, ssub_3),
(ssub_12 qqsub_1, ssub_4), (ssub_13 qqsub_1, ssub_5),
- (ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in {
+ (ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in
+{
def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>;
def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>;
}
@@ -196,9 +197,9 @@ def QQQQ3 : ARMReg<3, "qqqq3", [QQ6, QQ7]>;
}
// Current Program Status Register.
-def CPSR : ARMReg<0, "cpsr">;
-
-def FPSCR : ARMReg<1, "fpscr">;
+def CPSR : ARMReg<0, "cpsr">;
+def FPSCR : ARMReg<1, "fpscr">;
+def ITSTATE : ARMReg<2, "itstate">;
// Register classes.
//
@@ -348,6 +349,73 @@ def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {
}];
}
+// For tail calls, we can't use callee-saved registers, as they are restored
+// to the saved value before the tail call, which would clobber a call address.
+// Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of
+// this class and the preceding one(!) This is what we want.
+def tcGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R9, R12]> {
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ // R9 is available.
+ static const unsigned ARM_GPR_R9_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3,
+ ARM::R9, ARM::R12 };
+ // R9 is not available.
+ static const unsigned ARM_GPR_NOR9_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3,
+ ARM::R12 };
+
+ // For Thumb1 mode, we don't want to allocate hi regs at all, as we
+ // don't know how to spill them. If we make our prologue/epilogue code
+ // smarter at some point, we can go back to using the above allocation
+ // orders for the Thumb1 instructions that know how to use hi regs.
+ static const unsigned THUMB_GPR_AO_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+
+ tcGPRClass::iterator
+ tcGPRClass::allocation_order_begin(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
+ if (Subtarget.isThumb1Only())
+ return THUMB_GPR_AO_TC;
+ if (Subtarget.isTargetDarwin()) {
+ if (Subtarget.isR9Reserved())
+ return ARM_GPR_NOR9_TC;
+ else
+ return ARM_GPR_R9_TC;
+ } else
+ // R9 is either callee-saved or reserved; can't use it.
+ return ARM_GPR_NOR9_TC;
+ }
+
+ tcGPRClass::iterator
+ tcGPRClass::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
+ GPRClass::iterator I;
+
+ if (Subtarget.isThumb1Only()) {
+ I = THUMB_GPR_AO_TC + (sizeof(THUMB_GPR_AO_TC)/sizeof(unsigned));
+ return I;
+ }
+
+ if (Subtarget.isTargetDarwin()) {
+ if (Subtarget.isR9Reserved())
+ I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
+ else
+ I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
+ } else
+ // R9 is either callee-saved or reserved; can't use it.
+ I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
+ return I;
+ }
+ }];
+}
+
+
// Scalar single precision floating point register class..
def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8,
S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22,
@@ -479,4 +547,3 @@ def QQQQPR : RegisterClass<"ARM", [v8i64],
// Condition code registers.
def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>;
-
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
index bbfc0b2..282abca 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA8.td
@@ -1,10 +1,10 @@
//=- ARMScheduleA8.td - ARM Cortex-A8 Scheduling Definitions -*- tablegen -*-=//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the ARM Cortex A8 processors.
@@ -32,50 +32,50 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_iALUx , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>]>,
//
// Binary Instructions that produce a result
- InstrItinData<IIC_iALUi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iALUr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
- InstrItinData<IIC_iALUsi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
- InstrItinData<IIC_iALUsr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
+ InstrItinData<IIC_iALUi ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iALUr ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
+ InstrItinData<IIC_iALUsi,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iALUsr,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
//
// Unary Instructions that produce a result
- InstrItinData<IIC_iUNAr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iUNAsi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iUNAsr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iUNAr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iUNAsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iUNAsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
//
// Compare instructions
- InstrItinData<IIC_iCMPi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMPr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
- InstrItinData<IIC_iCMPsi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMPsr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iCMPi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+ InstrItinData<IIC_iCMPr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iCMPsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMPsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
//
// Move instructions, unconditional
- InstrItinData<IIC_iMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
- InstrItinData<IIC_iMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
- InstrItinData<IIC_iMOVsr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
+ InstrItinData<IIC_iMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
+ InstrItinData<IIC_iMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
//
// Move instructions, conditional
- InstrItinData<IIC_iCMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
- InstrItinData<IIC_iCMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
- InstrItinData<IIC_iCMOVsr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+ InstrItinData<IIC_iCMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+ InstrItinData<IIC_iCMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
// Integer multiply pipeline
// Result written in E5, but that is relative to the last cycle of multicycle,
// so we use 6 for those cases
//
InstrItinData<IIC_iMUL16 , [InstrStage<1, [A8_Pipe0]>], [5, 1, 1]>,
- InstrItinData<IIC_iMAC16 , [InstrStage<1, [A8_Pipe1], 0>,
+ InstrItinData<IIC_iMAC16 , [InstrStage<1, [A8_Pipe1], 0>,
InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL32 , [InstrStage<1, [A8_Pipe1], 0>,
+ InstrItinData<IIC_iMUL32 , [InstrStage<1, [A8_Pipe1], 0>,
InstrStage<2, [A8_Pipe0]>], [6, 1, 1]>,
- InstrItinData<IIC_iMAC32 , [InstrStage<1, [A8_Pipe1], 0>,
+ InstrItinData<IIC_iMAC32 , [InstrStage<1, [A8_Pipe1], 0>,
InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
- InstrItinData<IIC_iMUL64 , [InstrStage<2, [A8_Pipe1], 0>,
+ InstrItinData<IIC_iMUL64 , [InstrStage<2, [A8_Pipe1], 0>,
InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
- InstrItinData<IIC_iMAC64 , [InstrStage<2, [A8_Pipe1], 0>,
+ InstrItinData<IIC_iMAC64 , [InstrStage<2, [A8_Pipe1], 0>,
InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
-
+
// Integer load pipeline
//
// loads have an extra cycle of latency, but are fully pipelined
@@ -166,7 +166,7 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrStage<2, [A8_Pipe1]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0]>]>,
-
+
// Branch
//
// no delay slots, so the latency of a branch is unimportant
@@ -276,14 +276,14 @@ def CortexA8Itineraries : ProcessorItineraries<
//
// Single-precision FP Load
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>]>,
//
// Double-precision FP Load
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoad64, [InstrStage<2, [A8_Issue], 0>,
+ InstrItinData<IIC_fpLoad64, [InstrStage<2, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0], 0>,
InstrStage<1, [A8_Pipe1]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -292,7 +292,7 @@ def CortexA8Itineraries : ProcessorItineraries<
//
// FP Load Multiple
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpLoadm, [InstrStage<3, [A8_Issue], 0>,
+ InstrItinData<IIC_fpLoadm, [InstrStage<3, [A8_Issue], 0>,
InstrStage<2, [A8_Pipe0], 0>,
InstrStage<2, [A8_Pipe1]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -301,14 +301,14 @@ def CortexA8Itineraries : ProcessorItineraries<
//
// Single-precision FP Store
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>]>,
//
// Double-precision FP Store
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStore64,[InstrStage<2, [A8_Issue], 0>,
+ InstrItinData<IIC_fpStore64,[InstrStage<2, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0], 0>,
InstrStage<1, [A8_Pipe1]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -317,7 +317,7 @@ def CortexA8Itineraries : ProcessorItineraries<
//
// FP Store Multiple
// use A8_Issue to enforce the 1 load/store per cycle limit
- InstrItinData<IIC_fpStorem, [InstrStage<3, [A8_Issue], 0>,
+ InstrItinData<IIC_fpStorem, [InstrStage<3, [A8_Issue], 0>,
InstrStage<2, [A8_Pipe0], 0>,
InstrStage<2, [A8_Pipe1]>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -329,35 +329,35 @@ def CortexA8Itineraries : ProcessorItineraries<
//
// VLD1
// FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD1, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_VLD1, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>]>,
//
// VLD2
// FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD2, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_VLD2, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 2, 1]>,
//
// VLD3
// FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD3, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_VLD3, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 1]>,
//
// VLD4
// FIXME: We don't model this instruction properly
- InstrItinData<IIC_VLD4, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_VLD4, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 2, 1]>,
//
// VST
// FIXME: We don't model this instruction properly
- InstrItinData<IIC_VST, [InstrStage<1, [A8_Issue], 0>,
+ InstrItinData<IIC_VST, [InstrStage<1, [A8_Issue], 0>,
InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_LdSt0], 0>,
InstrStage<1, [A8_NLSPipe]>]>,
@@ -600,7 +600,7 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_VTB4, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
- InstrStage<2, [A8_NLSPipe]>], [4, 2, 2, 3, 3, 1]>,
+ InstrStage<2, [A8_NLSPipe]>],[4, 2, 2, 3, 3, 1]>,
//
// VTBX
InstrItinData<IIC_VTBX1, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -610,9 +610,9 @@ def CortexA8Itineraries : ProcessorItineraries<
InstrItinData<IIC_VTBX3, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
- InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 1]>,
+ InstrStage<2, [A8_NLSPipe]>],[4, 1, 2, 2, 3, 1]>,
InstrItinData<IIC_VTBX4, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
InstrStage<1, [A8_NLSPipe]>,
InstrStage<1, [A8_NPipe], 0>,
- InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
+ InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
index 75320d9..df2f896 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleA9.td
@@ -1,10 +1,10 @@
//=- ARMScheduleA9.td - ARM Cortex-A9 Scheduling Definitions -*- tablegen -*-=//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the itinerary class data for the ARM Cortex A9 processors.
@@ -16,7 +16,6 @@
// Reference Manual".
//
// Functional units
-def A9_Issue : FuncUnit; // issue
def A9_Pipe0 : FuncUnit; // pipeline 0
def A9_Pipe1 : FuncUnit; // pipeline 1
def A9_LSPipe : FuncUnit; // LS pipe
@@ -27,7 +26,121 @@ def A9_DRegsN : FuncUnit; // FP register set, NEON side
// Dual issue pipeline represented by A9_Pipe0 | A9_Pipe1
//
def CortexA9Itineraries : ProcessorItineraries<
- [A9_NPipe, A9_DRegsN, A9_DRegsVFP, A9_LSPipe, A9_Pipe0, A9_Pipe1, A9_Issue], [
+ [A9_NPipe, A9_DRegsN, A9_DRegsVFP, A9_LSPipe, A9_Pipe0, A9_Pipe1], [
+ // Two fully-pipelined integer ALU pipelines
+ // FIXME: There are no operand latencies for these instructions at all!
+ //
+ // Move instructions, unconditional
+ InstrItinData<IIC_iMOVi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1]>,
+ InstrItinData<IIC_iMOVr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
+ InstrItinData<IIC_iMOVsr , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
+ //
+ // No operand cycles
+ InstrItinData<IIC_iALUx , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>]>,
+ //
+ // Binary Instructions that produce a result
+ InstrItinData<IIC_iALUi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iALUr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2, 2]>,
+ InstrItinData<IIC_iALUsi, [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
+ InstrItinData<IIC_iALUsr,[InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1, 1]>,
+ //
+ // Unary Instructions that produce a result
+ InstrItinData<IIC_iUNAr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iUNAsi , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iUNAsr , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+ //
+ // Compare instructions
+ InstrItinData<IIC_iCMPi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
+ InstrItinData<IIC_iCMPr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+ InstrItinData<IIC_iCMPsi , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMPsr , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+ //
+ // Move instructions, conditional
+ InstrItinData<IIC_iCMOVi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
+ InstrItinData<IIC_iCMOVr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMOVsi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+ InstrItinData<IIC_iCMOVsr , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+
+ // Integer multiply pipeline
+ //
+ InstrItinData<IIC_iMUL16 , [InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<2, [A9_Pipe0]>], [4, 1, 1]>,
+ InstrItinData<IIC_iMAC16 , [InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<2, [A9_Pipe0]>], [4, 1, 1, 2]>,
+ InstrItinData<IIC_iMUL32 , [InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<2, [A9_Pipe0]>], [4, 1, 1]>,
+ InstrItinData<IIC_iMAC32 , [InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<2, [A9_Pipe0]>], [4, 1, 1, 2]>,
+ InstrItinData<IIC_iMUL64 , [InstrStage<2, [A9_Pipe1], 0>,
+ InstrStage<3, [A9_Pipe0]>], [4, 5, 1, 1]>,
+ InstrItinData<IIC_iMAC64 , [InstrStage<2, [A9_Pipe1], 0>,
+ InstrStage<3, [A9_Pipe0]>], [4, 5, 1, 1]>,
+ // Integer load pipeline
+ // FIXME: The timings are some rough approximations
+ //
+ // Immediate offset
+ InstrItinData<IIC_iLoadi , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [3, 1]>,
+ //
+ // Register offset
+ InstrItinData<IIC_iLoadr , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [3, 1, 1]>,
+ //
+ // Scaled register offset
+ InstrItinData<IIC_iLoadsi , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [4, 1, 1]>,
+ //
+ // Immediate offset with update
+ InstrItinData<IIC_iLoadiu , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [3, 2, 1]>,
+ //
+ // Register offset with update
+ InstrItinData<IIC_iLoadru , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [3, 2, 1, 1]>,
+ //
+ // Scaled register offset with update
+ InstrItinData<IIC_iLoadsiu , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [4, 3, 1, 1]>,
+ //
+ // Load multiple
+ InstrItinData<IIC_iLoadm , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>]>,
+
+ // Integer store pipeline
+ ///
+ // Immediate offset
+ InstrItinData<IIC_iStorei , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [3, 1]>,
+ //
+ // Register offset
+ InstrItinData<IIC_iStorer , [InstrStage<1, [ A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [3, 1, 1]>,
+ //
+ // Scaled register offset
+ InstrItinData<IIC_iStoresi , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [3, 1, 1]>,
+ //
+ // Immediate offset with update
+ InstrItinData<IIC_iStoreiu , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [2, 3, 1]>,
+ //
+ // Register offset with update
+ InstrItinData<IIC_iStoreru , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>], [2, 3, 1, 1]>,
+ //
+ // Scaled register offset with update
+ InstrItinData<IIC_iStoresiu, [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_LSPipe]>], [3, 3, 1, 1]>,
+ //
+ // Store multiple
+ InstrItinData<IIC_iStorem , [InstrStage<1, [A9_Pipe1]>,
+ InstrStage<1, [A9_LSPipe]>]>,
+ // Branch
+ //
+ // no delay slots, so the latency of a branch is unimportant
+ InstrItinData<IIC_Br , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>]>,
+
// VFP and NEON shares the same register file. This means that every VFP
// instruction should wait for full completion of the consecutive NEON
// instruction and vice-versa. We model this behavior with two artificial FUs:
@@ -39,8 +152,8 @@ def CortexA9Itineraries : ProcessorItineraries<
// register file writeback!).
// Every NEON instruction does the same but with FUs swapped.
//
- // Since the reserved FU cannot be acquired this models precisly "cross-domain"
- // stalls.
+ // Since the reserved FU cannot be acquired, this models precisely
+ // "cross-domain" stalls.
// VFP
// Issue through integer pipeline, and execute in NEON unit.
@@ -48,21 +161,21 @@ def CortexA9Itineraries : ProcessorItineraries<
// FP Special Register to Integer Register File Move
InstrItinData<IIC_fpSTAT , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>]>,
//
// Single-precision FP Unary
InstrItinData<IIC_fpUNA32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
// Double-precision FP Unary
InstrItinData<IIC_fpUNA64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
@@ -70,124 +183,124 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_fpCMP32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 4 cycles
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
// Double-precision FP Compare
InstrItinData<IIC_fpCMP64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra latency cycles since wbck is 4 cycles
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
// Single to Double FP Convert
InstrItinData<IIC_fpCVTSD , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Double to Single FP Convert
InstrItinData<IIC_fpCVTDS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Single to Half FP Convert
InstrItinData<IIC_fpCVTSH , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Half to Single FP Convert
InstrItinData<IIC_fpCVTHS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 1]>,
//
// Single-Precision FP to Integer Convert
InstrItinData<IIC_fpCVTSI , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Double-Precision FP to Integer Convert
InstrItinData<IIC_fpCVTDI , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Integer to Single-Precision FP Convert
InstrItinData<IIC_fpCVTIS , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Integer to Double-Precision FP Convert
InstrItinData<IIC_fpCVTID , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Single-precision FP ALU
InstrItinData<IIC_fpALU32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
//
// Double-precision FP ALU
InstrItinData<IIC_fpALU64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<5, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
//
// Single-precision FP Multiply
InstrItinData<IIC_fpMUL32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<6, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [5, 1, 1]>,
//
// Double-precision FP Multiply
InstrItinData<IIC_fpMUL64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<7, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 1, 1]>,
//
// Single-precision FP MAC
InstrItinData<IIC_fpMAC32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<9, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [8, 0, 1, 1]>,
//
// Double-precision FP MAC
InstrItinData<IIC_fpMAC64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<10, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [9, 0, 1, 1]>,
//
// Single-precision FP DIV
InstrItinData<IIC_fpDIV32 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<16, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<10, [A9_NPipe]>], [15, 1, 1]>,
//
// Double-precision FP DIV
InstrItinData<IIC_fpDIV64 , [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<26, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<20, [A9_NPipe]>], [25, 1, 1]>,
//
// Single-precision FP SQRT
InstrItinData<IIC_fpSQRT32, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<18, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<13, [A9_NPipe]>], [17, 1]>,
+ InstrStage<1, [A9_Pipe1]>,
+ InstrStage<13, [A9_NPipe]>], [17, 1]>,
//
// Double-precision FP SQRT
InstrItinData<IIC_fpSQRT64, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<33, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<28, [A9_NPipe]>], [32, 1]>,
//
@@ -195,92 +308,79 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_fpMOVIS, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra 1 latency cycle since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
// Integer to Double-precision Move
InstrItinData<IIC_fpMOVID, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
// Extra 1 latency cycle since wbck is 2 cycles
InstrStage<3, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1, 1]>,
//
// Single-precision to Integer Move
InstrItinData<IIC_fpMOVSI, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1]>,
//
// Double-precision to Integer Move
InstrItinData<IIC_fpMOVDI, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [1, 1, 1]>,
//
// Single-precision FP Load
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpLoad32, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// Double-precision FP Load
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpLoad64, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// FP Load Multiple
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpLoadm, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// Single-precision FP Store
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpStore32,[InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// Double-precision FP Store
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpStore64,[InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// FP Store Multiple
- // use A9_Issue to enforce the 1 load/store per cycle limit
InstrItinData<IIC_fpStorem, [InstrStage<1, [A9_DRegsVFP], 0, Required>,
InstrStage<2, [A9_DRegsN], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
// NEON
// Issue through integer pipeline, and execute in NEON unit.
- // FIXME: Neon pipeline and LdSt unit are multiplexed.
+ // FIXME: Neon pipeline and LdSt unit are multiplexed.
// Add some syntactic sugar to model this!
// VLD1
// FIXME: We don't model this instruction properly
InstrItinData<IIC_VLD1, [InstrStage<1, [A9_DRegsN], 0, Required>,
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// VLD2
@@ -288,9 +388,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VLD2, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>], [2, 2, 1]>,
//
// VLD3
@@ -298,9 +397,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VLD3, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>], [2, 2, 2, 1]>,
//
// VLD4
@@ -308,9 +406,8 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VLD4, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>], [2, 2, 2, 2, 1]>,
//
// VST
@@ -318,121 +415,120 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VST, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Issue], 0>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<1, [A9_LSPipe], 0>,
+ InstrStage<1, [A9_Pipe1], 0>,
+ InstrStage<1, [A9_LSPipe]>,
InstrStage<1, [A9_NPipe]>]>,
//
// Double-register Integer Unary
InstrItinData<IIC_VUNAiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2]>,
//
// Quad-register Integer Unary
InstrItinData<IIC_VUNAiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2]>,
//
// Double-register Integer Q-Unary
InstrItinData<IIC_VQUNAiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Quad-register Integer CountQ-Unary
InstrItinData<IIC_VQUNAiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1]>,
//
// Double-register Integer Binary
InstrItinData<IIC_VBINiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
//
// Quad-register Integer Binary
InstrItinData<IIC_VBINiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
//
// Double-register Integer Subtract
InstrItinData<IIC_VSUBiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 2, 1]>,
//
// Quad-register Integer Subtract
InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 2, 1]>,
//
// Double-register Integer Shift
InstrItinData<IIC_VSHLiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 1, 1]>,
//
// Quad-register Integer Shift
InstrItinData<IIC_VSHLiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 1, 1]>,
//
// Double-register Integer Shift (4 cycle)
InstrItinData<IIC_VSHLi4D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
//
// Quad-register Integer Shift (4 cycle)
InstrItinData<IIC_VSHLi4Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 1, 1]>,
//
// Double-register Integer Binary (4 cycle)
InstrItinData<IIC_VBINi4D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2, 2]>,
//
// Quad-register Integer Binary (4 cycle)
InstrItinData<IIC_VBINi4Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2, 2]>,
//
// Double-register Integer Subtract (4 cycle)
InstrItinData<IIC_VSUBiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2, 1]>,
//
// Quad-register Integer Subtract (4 cycle)
InstrItinData<IIC_VSUBiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [4, 2, 1]>,
//
@@ -440,7 +536,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VCNTiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3, 2, 2]>,
//
// Quad-register Integer Count
@@ -449,35 +545,35 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VCNTiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [4, 2, 2]>,
//
// Double-register Absolute Difference and Accumulate
InstrItinData<IIC_VABAD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [6, 3, 2, 1]>,
//
// Quad-register Absolute Difference and Accumulate
InstrItinData<IIC_VABAQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 3, 2, 1]>,
//
// Double-register Integer Pair Add Long
InstrItinData<IIC_VPALiD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [6, 3, 1]>,
//
// Quad-register Integer Pair Add Long
InstrItinData<IIC_VPALiQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 3, 1]>,
//
@@ -485,14 +581,14 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VMULi16D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [6, 2, 2]>,
//
// Quad-register Integer Multiply (.8, .16)
InstrItinData<IIC_VMULi16Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [7, 2, 2]>,
//
@@ -500,56 +596,56 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VMULi32D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [7, 2, 1]>,
//
// Quad-register Integer Multiply (.32)
InstrItinData<IIC_VMULi32Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<4, [A9_NPipe]>], [9, 2, 1]>,
//
// Double-register Integer Multiply-Accumulate (.8, .16)
InstrItinData<IIC_VMACi16D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [6, 3, 2, 2]>,
//
// Double-register Integer Multiply-Accumulate (.32)
InstrItinData<IIC_VMACi32D, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [7, 3, 2, 1]>,
//
// Quad-register Integer Multiply-Accumulate (.8, .16)
InstrItinData<IIC_VMACi16Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [7, 3, 2, 2]>,
//
// Quad-register Integer Multiply-Accumulate (.32)
InstrItinData<IIC_VMACi32Q, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<4, [A9_NPipe]>], [9, 3, 2, 1]>,
//
// Move Immediate
InstrItinData<IIC_VMOVImm, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [3]>,
//
// Double-register Permute Move
InstrItinData<IIC_VMOVD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_LSPipe]>], [2, 1]>,
//
// Quad-register Permute Move
@@ -558,42 +654,42 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VMOVQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<4, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 1]>,
//
// Integer to Single-precision Move
InstrItinData<IIC_VMOVIS , [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 1]>,
//
// Integer to Double-precision Move
InstrItinData<IIC_VMOVID , [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 1, 1]>,
//
// Single-precision to Integer Move
InstrItinData<IIC_VMOVSI , [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 1]>,
//
// Double-precision to Integer Move
InstrItinData<IIC_VMOVDI , [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<3, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 2, 1]>,
//
// Integer to Lane Move
InstrItinData<IIC_VMOVISL , [InstrStage<1, [A9_DRegsN], 0, Required>,
// FIXME: all latencies are arbitrary, no information is available
InstrStage<4, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 1, 1]>,
//
@@ -601,7 +697,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VUNAD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [5, 2]>,
//
// Quad-register FP Unary
@@ -610,7 +706,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VUNAQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 2]>,
//
// Double-register FP Binary
@@ -619,7 +715,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VBIND, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [5, 2, 2]>,
//
// Quad-register FP Binary
@@ -630,14 +726,14 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VBINQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 2, 2]>,
//
// Double-register FP Multiple-Accumulate
InstrItinData<IIC_VMACD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 3, 2, 1]>,
//
// Quad-register FP Multiple-Accumulate
@@ -646,28 +742,28 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VMACQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<4, [A9_NPipe]>], [8, 4, 2, 1]>,
//
// Double-register Reciprical Step
InstrItinData<IIC_VRECSD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [6, 2, 2]>,
//
// Quad-register Reciprical Step
InstrItinData<IIC_VRECSQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<10, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<4, [A9_NPipe]>], [8, 2, 2]>,
//
// Double-register Permute
InstrItinData<IIC_VPERMD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 6 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 2, 1, 1]>,
//
// Quad-register Permute
@@ -676,7 +772,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VPERMQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 3, 1, 1]>,
//
// Quad-register Permute (3 cycle issue)
@@ -685,7 +781,7 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VPERMQ3, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<3, [A9_LSPipe]>], [4, 4, 1, 1]>,
//
@@ -693,57 +789,57 @@ def CortexA9Itineraries : ProcessorItineraries<
InstrItinData<IIC_VEXTD, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<7, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<1, [A9_NPipe]>], [2, 1, 1]>,
//
// Quad-register VEXT
InstrItinData<IIC_VEXTQ, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 9 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 1, 1]>,
//
// VTB
InstrItinData<IIC_VTB1, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 2, 1]>,
InstrItinData<IIC_VTB2, [InstrStage<2, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 2, 2, 1]>,
InstrItinData<IIC_VTB3, [InstrStage<2, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<3, [A9_NPipe]>], [4, 2, 2, 3, 1]>,
InstrItinData<IIC_VTB4, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<3, [A9_NPipe]>], [4, 2, 2, 3, 3, 1]>,
//
// VTBX
InstrItinData<IIC_VTBX1, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 1, 2, 1]>,
InstrItinData<IIC_VTBX2, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 7 cycles
InstrStage<8, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<2, [A9_NPipe]>], [3, 1, 2, 2, 1]>,
InstrItinData<IIC_VTBX3, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
+ InstrStage<1, [A9_Pipe1]>,
InstrStage<3, [A9_NPipe]>], [4, 1, 2, 2, 3, 1]>,
InstrItinData<IIC_VTBX4, [InstrStage<1, [A9_DRegsN], 0, Required>,
// Extra latency cycles since wbck is 8 cycles
InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
- InstrStage<1, [A9_Pipe0, A9_Pipe1]>,
- InstrStage<2, [A9_NPipe]>], [4, 1, 2, 2, 3, 3, 1]>
+ InstrStage<1, [A9_Pipe1]>,
+ InstrStage<2, [A9_NPipe]>], [4, 1, 2, 2, 3, 3, 1]>
]>;
diff --git a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
index f813022..08b560c 100644
--- a/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
+++ b/contrib/llvm/lib/Target/ARM/ARMScheduleV6.td
@@ -16,7 +16,7 @@
// Functional Units
def V6_Pipe : FuncUnit; // pipeline
-// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual".
+// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual"
//
def ARMV6Itineraries : ProcessorItineraries<
[V6_Pipe], [
diff --git a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
index 8332bba..e7d92ed 100644
--- a/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/contrib/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -54,6 +54,9 @@ protected:
/// the VML[AS] instructions are slow (if so, don't use them).
bool SlowVMLx;
+ /// SlowFPBrcc - True if floating point compare + branch is slow.
+ bool SlowFPBrcc;
+
/// IsThumb - True if we are in thumb mode, false if in ARM mode.
bool IsThumb;
@@ -133,6 +136,7 @@ protected:
bool hasDivide() const { return HasHardwareDivide; }
bool hasT2ExtractPack() const { return HasT2ExtractPack; }
bool useVMLx() const {return hasVFP2() && !SlowVMLx; }
+ bool isFPBrccSlow() const { return SlowFPBrcc; }
bool hasFP16() const { return HasFP16; }
diff --git a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index b4a9252..09203f9 100644
--- a/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -60,8 +60,10 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT,
const std::string &FS)
: ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget),
DataLayout(Subtarget.isAPCS_ABI() ?
- std::string("e-p:32:32-f64:32:32-i64:32:32-n32") :
- std::string("e-p:32:32-f64:64:64-i64:64:64-n32")),
+ std::string("e-p:32:32-f64:32:32-i64:32:32-"
+ "v128:32:128-v64:32:64-n32") :
+ std::string("e-p:32:32-f64:64:64-i64:64:64-"
+ "v128:64:128-v64:64:64-n32")),
TLInfo(*this),
TSInfo(*this) {
}
@@ -74,9 +76,11 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, const std::string &TT,
: ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
DataLayout(Subtarget.isAPCS_ABI() ?
std::string("e-p:32:32-f64:32:32-i64:32:32-"
- "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32") :
+ "i16:16:32-i8:8:32-i1:8:32-"
+ "v128:32:128-v64:32:64-a:0:32-n32") :
std::string("e-p:32:32-f64:64:64-i64:64:64-"
- "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32")),
+ "i16:16:32-i8:8:32-i1:8:32-"
+ "v128:64:128-v64:64:64-a:0:32-n32")),
TLInfo(*this),
TSInfo(*this) {
}
@@ -98,6 +102,7 @@ bool ARMBaseTargetMachine::addPreRegAlloc(PassManagerBase &PM,
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
PM.add(createARMLoadStoreOptimizationPass(true));
+
return true;
}
@@ -115,21 +120,20 @@ bool ARMBaseTargetMachine::addPreSched2(PassManagerBase &PM,
// proper scheduling.
PM.add(createARMExpandPseudoPass());
- return true;
-}
-
-bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
- CodeGenOpt::Level OptLevel) {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (OptLevel != CodeGenOpt::None) {
if (!Subtarget.isThumb1Only())
PM.add(createIfConverterPass());
}
-
- if (Subtarget.isThumb2()) {
+ if (Subtarget.isThumb2())
PM.add(createThumb2ITBlockPass());
+
+ return true;
+}
+
+bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
+ CodeGenOpt::Level OptLevel) {
+ if (Subtarget.isThumb2())
PM.add(createThumb2SizeReductionPass());
- }
PM.add(createARMConstantIslandPass());
return true;
diff --git a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index bfa89c4..4b08324 100644
--- a/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -88,7 +88,7 @@ private:
/// its register number, or -1 if there is no match. To allow return values
/// to be used directly in register lists, arm registers have values between
/// 0 and 15.
- int MatchRegisterName(const StringRef &Name);
+ int MatchRegisterName(StringRef Name);
/// }
@@ -97,7 +97,7 @@ public:
ARMAsmParser(const Target &T, MCAsmParser &_Parser)
: TargetAsmParser(T), Parser(_Parser) {}
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
@@ -425,7 +425,7 @@ bool ARMAsmParser::ParseMemory(OwningPtr<ARMOperand> &Op) {
const AsmToken &NextTok = Parser.getTok();
if (NextTok.isNot(AsmToken::EndOfStatement)) {
if (NextTok.isNot(AsmToken::Comma))
- return Error(NextTok.getLoc(), "',' expected");
+ return Error(NextTok.getLoc(), "',' expected");
Parser.Lex(); // Eat comma token.
if(ParseMemoryOffsetReg(Negative, OffsetRegShifted, ShiftType,
ShiftAmount, Offset, OffsetIsReg, OffsetRegNum,
@@ -488,7 +488,7 @@ bool ARMAsmParser::ParseMemoryOffsetReg(bool &Negative,
const AsmToken &Tok = Parser.getTok();
if (ParseShift(ShiftType, ShiftAmount, E))
- return Error(Tok.getLoc(), "shift expected");
+ return Error(Tok.getLoc(), "shift expected");
OffsetRegShifted = true;
}
}
@@ -517,7 +517,7 @@ bool ARMAsmParser::ParseShift(ShiftType &St,
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return true;
- const StringRef &ShiftName = Tok.getString();
+ StringRef ShiftName = Tok.getString();
if (ShiftName == "lsl" || ShiftName == "LSL")
St = Lsl;
else if (ShiftName == "lsr" || ShiftName == "LSR")
@@ -549,7 +549,7 @@ bool ARMAsmParser::ParseShift(ShiftType &St,
}
/// A hack to allow some testing, to be replaced by a real table gen version.
-int ARMAsmParser::MatchRegisterName(const StringRef &Name) {
+int ARMAsmParser::MatchRegisterName(StringRef Name) {
if (Name == "r0" || Name == "R0")
return 0;
else if (Name == "r1" || Name == "R1")
@@ -593,7 +593,7 @@ MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
MCInst &Inst) {
ARMOperand &Op0 = *(ARMOperand*)Operands[0];
assert(Op0.Kind == ARMOperand::Token && "First operand not a Token");
- const StringRef &Mnemonic = Op0.getToken();
+ StringRef Mnemonic = Op0.getToken();
if (Mnemonic == "add" ||
Mnemonic == "stmfd" ||
Mnemonic == "str" ||
@@ -658,14 +658,13 @@ bool ARMAsmParser::ParseOperand(OwningPtr<ARMOperand> &Op) {
}
/// Parse an arm instruction mnemonic followed by its operands.
-bool ARMAsmParser::ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
OwningPtr<ARMOperand> Op;
ARMOperand::CreateToken(Op, Name, NameLoc);
Operands.push_back(Op.take());
- SMLoc Loc = Parser.getTok().getLoc();
if (getLexer().isNot(AsmToken::EndOfStatement)) {
// Read the first operand.
@@ -762,16 +761,11 @@ bool ARMAsmParser::ParseDirectiveSyntax(SMLoc L) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return Error(L, "unexpected token in .syntax directive");
- const StringRef &Mode = Tok.getString();
- bool unified_syntax;
- if (Mode == "unified" || Mode == "UNIFIED") {
+ StringRef Mode = Tok.getString();
+ if (Mode == "unified" || Mode == "UNIFIED")
Parser.Lex();
- unified_syntax = true;
- }
- else if (Mode == "divided" || Mode == "DIVIDED") {
+ else if (Mode == "divided" || Mode == "DIVIDED")
Parser.Lex();
- unified_syntax = false;
- }
else
return Error(L, "unrecognized syntax mode in .syntax directive");
@@ -791,15 +785,10 @@ bool ARMAsmParser::ParseDirectiveCode(SMLoc L) {
if (Tok.isNot(AsmToken::Integer))
return Error(L, "unexpected token in .code directive");
int64_t Val = Parser.getTok().getIntVal();
- bool thumb_mode;
- if (Val == 16) {
+ if (Val == 16)
Parser.Lex();
- thumb_mode = true;
- }
- else if (Val == 32) {
+ else if (Val == 32)
Parser.Lex();
- thumb_mode = false;
- }
else
return Error(L, "invalid operand to .code directive");
diff --git a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
index d95efdb..946f474 100644
--- a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
@@ -175,23 +175,8 @@ namespace {
raw_ostream &O);
void printVFPf64ImmOperand(const MachineInstr *MI, int OpNum,
raw_ostream &O);
-
- void printHex8ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xff);
- }
- void printHex16ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffff);
- }
- void printHex32ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffffffff);
- }
- void printHex64ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm());
- }
+ void printNEONModImmOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O);
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
@@ -322,7 +307,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
unsigned DRegLo = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_0);
unsigned DRegHi = TM.getRegisterInfo()->getSubReg(Reg, ARM::dsub_1);
O << '{'
- << getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
+ << getRegisterName(DRegLo) << ", " << getRegisterName(DRegHi)
<< '}';
} else if (Modifier && strcmp(Modifier, "lane") == 0) {
unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
@@ -618,7 +603,7 @@ void ARMAsmPrinter::printAddrMode6Operand(const MachineInstr *MI, int Op,
O << "[" << getRegisterName(MO1.getReg());
if (MO2.getImm()) {
// FIXME: Both darwin as and GNU as violate ARM docs here.
- O << ", :" << MO2.getImm();
+ O << ", :" << (MO2.getImm() << 3);
}
O << "]";
}
@@ -1039,6 +1024,14 @@ void ARMAsmPrinter::printVFPf64ImmOperand(const MachineInstr *MI, int OpNum,
}
}
+void ARMAsmPrinter::printNEONModImmOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O) {
+ unsigned EncodedImm = MI->getOperand(OpNum).getImm();
+ unsigned EltBits;
+ uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits);
+ O << "#0x" << utohexstr(Val);
+}
+
bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) {
@@ -1064,20 +1057,10 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
printOperand(MI, OpNum, O);
return false;
case 'Q':
- if (TM.getTargetData()->isLittleEndian())
- break;
- // Fallthrough
case 'R':
- if (TM.getTargetData()->isBigEndian())
- break;
- // Fallthrough
- case 'H': // Write second word of DI / DF reference.
- // Verify that this operand has two consecutive registers.
- if (!MI->getOperand(OpNum).isReg() ||
- OpNum+1 == MI->getNumOperands() ||
- !MI->getOperand(OpNum+1).isReg())
- return true;
- ++OpNum; // Return the high-part.
+ case 'H':
+ report_fatal_error("llvm does not support 'Q', 'R', and 'H' modifiers!");
+ return true;
}
}
@@ -1384,11 +1367,11 @@ void ARMAsmPrinter::printInstructionThroughMCStreamer(const MachineInstr *MI) {
} else if (MO.isGlobal()) {
MCSymbol *Symbol = MCInstLowering.GetGlobalAddressSymbol(MO);
const MCSymbolRefExpr *SymRef1 =
- MCSymbolRefExpr::Create(Symbol,
- MCSymbolRefExpr::VK_ARM_LO16, OutContext);
+ MCSymbolRefExpr::Create(Symbol,
+ MCSymbolRefExpr::VK_ARM_LO16, OutContext);
const MCSymbolRefExpr *SymRef2 =
- MCSymbolRefExpr::Create(Symbol,
- MCSymbolRefExpr::VK_ARM_HI16, OutContext);
+ MCSymbolRefExpr::Create(Symbol,
+ MCSymbolRefExpr::VK_ARM_HI16, OutContext);
V1 = MCOperand::CreateExpr(SymRef1);
V2 = MCOperand::CreateExpr(SymRef2);
} else {
diff --git a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
index 2b94b76..edc9345 100644
--- a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
@@ -442,7 +442,7 @@ void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum,
O << "[" << getRegisterName(MO1.getReg());
if (MO2.getImm()) {
// FIXME: Both darwin as and GNU as violate ARM docs here.
- O << ", :" << MO2.getImm();
+ O << ", :" << (MO2.getImm() << 3);
}
O << "]";
}
@@ -779,22 +779,10 @@ void ARMInstPrinter::printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum,
O << '#' << MI->getOperand(OpNum).getImm();
}
-void ARMInstPrinter::printHex8ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xff);
-}
-
-void ARMInstPrinter::printHex16ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffff);
-}
-
-void ARMInstPrinter::printHex32ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffffffff);
-}
-
-void ARMInstPrinter::printHex64ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm());
+void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned EncodedImm = MI->getOperand(OpNum).getImm();
+ unsigned EltBits;
+ uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits);
+ O << "#0x" << utohexstr(Val);
}
diff --git a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
index be0b7c1..ddf5047 100644
--- a/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
+++ b/contrib/llvm/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
@@ -104,10 +104,7 @@ public:
void printNoHashImmediate(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex8ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex16ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex32ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex64ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printNEONModImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
// FIXME: Implement.
diff --git a/contrib/llvm/lib/Target/ARM/CMakeLists.txt b/contrib/llvm/lib/Target/ARM/CMakeLists.txt
index 29e66e1..0df3466 100644
--- a/contrib/llvm/lib/Target/ARM/CMakeLists.txt
+++ b/contrib/llvm/lib/Target/ARM/CMakeLists.txt
@@ -33,6 +33,7 @@ add_llvm_target(ARMCodeGen
NEONPreAllocPass.cpp
Thumb1InstrInfo.cpp
Thumb1RegisterInfo.cpp
+ Thumb2HazardRecognizer.cpp
Thumb2ITBlockPass.cpp
Thumb2InstrInfo.cpp
Thumb2RegisterInfo.cpp
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
index adb7795..a07ff28 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
@@ -34,7 +34,7 @@
/// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
/// defined with two components:
///
-/// def pred { // Operand PredicateOperand
+/// def pred { // Operand PredicateOperand
/// ValueType Type = OtherVT;
/// string PrintMethod = "printPredicateOperand";
/// string AsmOperandLowerMethod = ?;
@@ -54,7 +54,7 @@
///
/// For the Defs part, in the simple case of only cc_out:$s, we have:
///
-/// def cc_out { // Operand OptionalDefOperand
+/// def cc_out { // Operand OptionalDefOperand
/// ValueType Type = OtherVT;
/// string PrintMethod = "printSBitModifierOperand";
/// string AsmOperandLowerMethod = ?;
@@ -765,7 +765,7 @@ static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
|| Opcode == ARM::SMC || Opcode == ARM::SVC) &&
"Unexpected Opcode");
- assert(NumOps >= 1 && OpInfo[0].RegClass == 0 && "Reg operand expected");
+ assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Reg operand expected");
int Imm32 = 0;
if (Opcode == ARM::SMC) {
@@ -1106,7 +1106,7 @@ static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
(OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+2].RegClass == 0) &&
+ (OpInfo[OpIdx+2].RegClass < 0) &&
"Expect 3 reg operands");
// Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
@@ -1201,7 +1201,7 @@ static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0) &&
+ (OpInfo[OpIdx+1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
@@ -1323,7 +1323,7 @@ static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0) &&
+ (OpInfo[OpIdx+1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
@@ -1494,7 +1494,7 @@ static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// If there is still an operand info left which is an immediate operand, add
// an additional imm5 LSL/ASR operand.
- if (ThreeReg && OpInfo[OpIdx].RegClass == 0
+ if (ThreeReg && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Extract the 5-bit immediate field Inst{11-7}.
unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
@@ -1540,7 +1540,7 @@ static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// If there is still an operand info left which is an immediate operand, add
// an additional rotate immediate operand.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Extract the 2-bit rotate field Inst{11-10}.
unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
@@ -1725,7 +1725,7 @@ static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
"Tied to operand expected");
MI.addOperand(MI.getOperand(0));
- assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
+ assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef() && "Imm operand expected");
MI.addOperand(MCOperand::CreateImm(fbits));
@@ -1984,7 +1984,7 @@ static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
++OpIdx;
// Extract/decode the f64/f32 immediate.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// The asm syntax specifies the before-expanded <imm>.
// Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
@@ -2077,42 +2077,12 @@ static unsigned decodeLaneIndex(uint32_t insn) {
// imm3 = Inst{18-16}, imm4 = Inst{3-0}
// Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
+ unsigned char op = (insn >> 5) & 1;
unsigned char cmode = (insn >> 8) & 0xF;
unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
((insn >> 16) & 7) << 4 |
(insn & 0xF);
- uint64_t Imm64 = 0;
-
- switch (esize) {
- case ESize8:
- Imm64 = Imm8;
- break;
- case ESize16:
- Imm64 = Imm8 << 8*(cmode >> 1 & 1);
- break;
- case ESize32: {
- if (cmode == 12)
- Imm64 = (Imm8 << 8) | 0xFF;
- else if (cmode == 13)
- Imm64 = (Imm8 << 16) | 0xFFFF;
- else {
- // Imm8 to be shifted left by how many bytes...
- Imm64 = Imm8 << 8*(cmode >> 1 & 3);
- }
- break;
- }
- case ESize64: {
- for (unsigned i = 0; i < 8; ++i)
- if ((Imm8 >> i) & 1)
- Imm64 |= (uint64_t)0xFF << 8*i;
- break;
- }
- default:
- assert(0 && "Unreachable code!");
- return 0;
- }
-
- return Imm64;
+ return (op << 12) | (cmode << 8) | Imm8;
}
// A8.6.339 VMUL, VMULL (by scalar)
@@ -2303,7 +2273,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
}
assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
- OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
@@ -2320,7 +2290,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
"Reg operand expected");
RegClass = OpInfo[OpIdx].RegClass;
- while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+ while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
MI.addOperand(MCOperand::CreateReg(
getRegisterEnum(B, RegClass, Rd,
UseDRegPair(Opcode))));
@@ -2329,7 +2299,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// Handle possible lane index.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
++OpIdx;
@@ -2340,7 +2310,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
// possible TIED_TO DPR/QPR's (ignored), then possible lane index.
RegClass = OpInfo[0].RegClass;
- while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+ while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
MI.addOperand(MCOperand::CreateReg(
getRegisterEnum(B, RegClass, Rd,
UseDRegPair(Opcode))));
@@ -2355,7 +2325,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
}
assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
- OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
@@ -2366,7 +2336,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
++OpIdx;
}
- while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+ while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
"Tied to operand expected");
MI.addOperand(MCOperand::CreateReg(0));
@@ -2374,7 +2344,7 @@ static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
}
// Handle possible lane index.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
++OpIdx;
@@ -2438,7 +2408,7 @@ static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[1].RegClass == 0) &&
+ (OpInfo[1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
// Qd/Dd = Inst{22:15-12} => NEON Rd
@@ -2552,7 +2522,7 @@ static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
}
// Add the imm operand, if required.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
unsigned imm = 0xFFFFFFFF;
@@ -2632,7 +2602,7 @@ static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
decodeNEONRm(insn))));
++OpIdx;
- assert(OpInfo[OpIdx].RegClass == 0 && "Imm operand expected");
+ assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
// Add the imm operand.
@@ -2762,7 +2732,7 @@ static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Add the imm operand.
unsigned Imm = 0;
@@ -2869,15 +2839,9 @@ static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
return true;
}
-static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- assert(0 && "Unreachable code!");
- return false;
-}
-
// Vector Get Lane (move scalar to ARM core register) Instructions.
// VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
-static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
@@ -2887,7 +2851,7 @@ static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
- OpInfo[2].RegClass == 0 &&
+ OpInfo[2].RegClass < 0 &&
"Expect >= 3 operands with one dst operand");
ElemSize esize =
@@ -2911,7 +2875,7 @@ static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// Vector Set Lane (move ARM core register to scalar) Instructions.
// VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
-static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
@@ -2923,7 +2887,7 @@ static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
OpInfo[1].RegClass == ARM::DPRRegClassID &&
TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
OpInfo[2].RegClass == ARM::GPRRegClassID &&
- OpInfo[3].RegClass == 0 &&
+ OpInfo[3].RegClass < 0 &&
"Expect >= 3 operands with one dst operand");
ElemSize esize =
@@ -2950,7 +2914,7 @@ static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
// Vector Duplicate Instructions (from ARM core register to all elements).
// VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
-static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
@@ -3090,13 +3054,6 @@ static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
return false;
}
-static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded, BO) {
-
- assert(0 && "Unexpected thumb misc. instruction!");
- return false;
-}
-
/// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
/// We divide the disassembly task into different categories, with each one
/// corresponding to a specific instruction encoding format. There could be
@@ -3128,12 +3085,10 @@ static const DisassembleFP FuncPtrs[] = {
&DisassembleVFPLdStMulFrm,
&DisassembleVFPMiscFrm,
&DisassembleThumbFrm,
- &DisassembleNEONFrm,
- &DisassembleNEONGetLnFrm,
- &DisassembleNEONSetLnFrm,
- &DisassembleNEONDupFrm,
&DisassembleMiscFrm,
- &DisassembleThumbMiscFrm,
+ &DisassembleNGetLnFrm,
+ &DisassembleNSetLnFrm,
+ &DisassembleNDupFrm,
// VLD and VST (including one lane) Instructions.
&DisassembleNLdSt,
@@ -3233,7 +3188,8 @@ bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
// a pair of TargetOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
- OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
+ OpInfo[Idx].RegClass < 0 &&
+ OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
{
// If we are inside an IT block, get the IT condition bits maintained via
// ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
@@ -3265,7 +3221,8 @@ bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
// a pair of TargetOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
- OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
+ OpInfo[Idx].RegClass < 0 &&
+ OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
{
// If we are inside an IT block, get the IT condition bits maintained via
// ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.h b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.h
index b1d90df..7d21256 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.h
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ARMDisassemblerCore.h
@@ -137,25 +137,25 @@ static inline void setSlice(uint32_t &Bits, unsigned From, unsigned To,
/// Various utilities for checking the target specific flags.
/// A unary data processing instruction doesn't have an Rn operand.
-static inline bool isUnaryDP(unsigned TSFlags) {
+static inline bool isUnaryDP(uint64_t TSFlags) {
return (TSFlags & ARMII::UnaryDP);
}
/// This four-bit field describes the addressing mode used.
/// See also ARMBaseInstrInfo.h.
-static inline unsigned getAddrMode(unsigned TSFlags) {
+static inline unsigned getAddrMode(uint64_t TSFlags) {
return (TSFlags & ARMII::AddrModeMask);
}
/// {IndexModePre, IndexModePost}
/// Only valid for load and store ops.
/// See also ARMBaseInstrInfo.h.
-static inline unsigned getIndexMode(unsigned TSFlags) {
+static inline unsigned getIndexMode(uint64_t TSFlags) {
return (TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
}
/// Pre-/post-indexed operations define an extra $base_wb in the OutOperandList.
-static inline bool isPrePostLdSt(unsigned TSFlags) {
+static inline bool isPrePostLdSt(uint64_t TSFlags) {
return (TSFlags & ARMII::IndexModeMask) != 0;
}
diff --git a/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h b/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
index 4b2e308..4b7a0bf 100644
--- a/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
+++ b/contrib/llvm/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
@@ -395,7 +395,7 @@ static bool DisassembleThumb1General(MCInst &MI, unsigned Opcode, uint32_t insn,
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::tGPRRegClassID,
getT1tRm(insn))));
} else {
- assert(OpInfo[OpIdx].RegClass == 0 &&
+ assert(OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
MI.addOperand(MCOperand::CreateImm(UseRt ? getT1Imm8(insn)
@@ -531,7 +531,7 @@ static bool DisassembleThumb1LdPC(MCInst &MI, unsigned Opcode, uint32_t insn,
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass == 0 &&
+ (OpInfo[1].RegClass < 0 &&
!OpInfo[1].isPredicate() &&
!OpInfo[1].isOptionalDef())
&& "Invalid arguments");
@@ -598,7 +598,7 @@ static bool DisassembleThumb1LdSt(unsigned opA, MCInst &MI, unsigned Opcode,
assert(OpIdx < NumOps && "More operands expected");
- if (OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate() &&
+ if (OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate() &&
!OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(Imm5 ? getT1Imm5(insn) : 0));
@@ -632,7 +632,7 @@ static bool DisassembleThumb1LdStSP(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::tGPRRegClassID &&
OpInfo[1].RegClass == ARM::GPRRegClassID &&
- (OpInfo[2].RegClass == 0 &&
+ (OpInfo[2].RegClass < 0 &&
!OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef())
&& "Invalid arguments");
@@ -658,7 +658,7 @@ static bool DisassembleThumb1AddPCi(MCInst &MI, unsigned Opcode, uint32_t insn,
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass == 0 &&
+ (OpInfo[1].RegClass < 0 &&
!OpInfo[1].isPredicate() &&
!OpInfo[1].isOptionalDef())
&& "Invalid arguments");
@@ -685,7 +685,7 @@ static bool DisassembleThumb1AddSPi(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::tGPRRegClassID &&
OpInfo[1].RegClass == ARM::GPRRegClassID &&
- (OpInfo[2].RegClass == 0 &&
+ (OpInfo[2].RegClass < 0 &&
!OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef())
&& "Invalid arguments");
@@ -761,7 +761,7 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn,
// Predicate operands are handled elsewhere.
if (NumOps == 2 &&
OpInfo[0].isPredicate() && OpInfo[1].isPredicate() &&
- OpInfo[0].RegClass == 0 && OpInfo[1].RegClass == ARM::CCRRegClassID) {
+ OpInfo[0].RegClass < 0 && OpInfo[1].RegClass == ARM::CCRRegClassID) {
return true;
}
@@ -808,7 +808,7 @@ static bool DisassembleThumb1Misc(MCInst &MI, unsigned Opcode, uint32_t insn,
}
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass==0 || OpInfo[1].RegClass==ARM::tGPRRegClassID)
+ (OpInfo[1].RegClass < 0 || OpInfo[1].RegClass==ARM::tGPRRegClassID)
&& "Expect >=2 operands");
// Add the destination operand.
@@ -913,7 +913,7 @@ static bool DisassembleThumb1CondBr(MCInst &MI, unsigned Opcode, uint32_t insn,
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
- assert(NumOps == 3 && OpInfo[0].RegClass == 0 &&
+ assert(NumOps == 3 && OpInfo[0].RegClass < 0 &&
OpInfo[1].isPredicate() && OpInfo[2].RegClass == ARM::CCRRegClassID
&& "Exactly 3 operands expected");
@@ -939,7 +939,7 @@ static bool DisassembleThumb1Br(MCInst &MI, unsigned Opcode, uint32_t insn,
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
- assert(NumOps == 1 && OpInfo[0].RegClass == 0 && "1 imm operand expected");
+ assert(NumOps == 1 && OpInfo[0].RegClass < 0 && "1 imm operand expected");
unsigned Imm11 = getT1Imm11(insn);
@@ -1239,7 +1239,7 @@ static bool DisassembleThumb2LdStDual(MCInst &MI, unsigned Opcode,
&& OpInfo[0].RegClass == ARM::GPRRegClassID
&& OpInfo[1].RegClass == ARM::GPRRegClassID
&& OpInfo[2].RegClass == ARM::GPRRegClassID
- && OpInfo[3].RegClass == 0
+ && OpInfo[3].RegClass < 0
&& "Expect >= 4 operands and first 3 as reg operands");
// Add the <Rt> <Rt2> operands.
@@ -1322,8 +1322,8 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
assert(NumOps == 4
&& OpInfo[0].RegClass == ARM::GPRRegClassID
&& OpInfo[1].RegClass == ARM::GPRRegClassID
- && OpInfo[2].RegClass == 0
- && OpInfo[3].RegClass == 0
+ && OpInfo[2].RegClass < 0
+ && OpInfo[3].RegClass < 0
&& "Exactlt 4 operands expect and first two as reg operands");
// Only need to populate the src reg operand.
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -1375,7 +1375,7 @@ static bool DisassembleThumb2DPSoReg(MCInst &MI, unsigned Opcode, uint32_t insn,
if (NumOps == OpIdx)
return true;
- if (OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ if (OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()) {
if (Thumb2ShiftOpcode(Opcode))
@@ -1440,7 +1440,7 @@ static bool DisassembleThumb2DPModImm(MCInst &MI, unsigned Opcode,
}
// The modified immediate operand should come next.
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0 &&
+ assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1555,7 +1555,7 @@ static bool DisassembleThumb2DPBinImm(MCInst &MI, unsigned Opcode,
++OpIdx;
}
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1772,7 +1772,7 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn,
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRm(insn))));
} else {
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
int Offset = 0;
@@ -1792,7 +1792,7 @@ static bool DisassembleThumb2PreLoad(MCInst &MI, unsigned Opcode, uint32_t insn,
}
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0 &&
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Fills in the shift amount for t2PLDs, t2PLDWs, t2PLIs.
MI.addOperand(MCOperand::CreateImm(slice(insn, 5, 4)));
@@ -1818,7 +1818,7 @@ static bool DisassembleThumb2Ldpci(MCInst &MI, unsigned Opcode,
assert(NumOps >= 2 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
- OpInfo[1].RegClass == 0 &&
+ OpInfo[1].RegClass < 0 &&
"Expect >= 2 operands, first as reg, and second as imm operand");
// Build the register operand, followed by the (+/-)imm12 immediate.
@@ -1930,7 +1930,7 @@ static bool DisassembleThumb2LdSt(bool Load, MCInst &MI, unsigned Opcode,
++OpIdx;
}
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1981,7 +1981,7 @@ static bool DisassembleThumb2DPReg(MCInst &MI, unsigned Opcode, uint32_t insn,
decodeRm(insn))));
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Add the rotation amount immediate.
MI.addOperand(MCOperand::CreateImm(decodeRotate(insn)));
diff --git a/contrib/llvm/lib/Target/ARM/NEONMoveFix.cpp b/contrib/llvm/lib/Target/ARM/NEONMoveFix.cpp
index 0a4400c..bbdd3c7 100644
--- a/contrib/llvm/lib/Target/ARM/NEONMoveFix.cpp
+++ b/contrib/llvm/lib/Target/ARM/NEONMoveFix.cpp
@@ -105,8 +105,8 @@ bool NEONMoveFixPass::InsertMoves(MachineBasicBlock &MBB) {
unsigned MOReg = MO.getReg();
Defs[MOReg] = MI;
- // Catch subregs as well.
- for (const unsigned *R = TRI->getSubRegisters(MOReg); *R; ++R)
+ // Catch aliases as well.
+ for (const unsigned *R = TRI->getAliasSet(MOReg); *R; ++R)
Defs[*R] = MI;
}
}
diff --git a/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp b/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
index a725898..f67717c 100644
--- a/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/NEONPreAllocPass.cpp
@@ -407,7 +407,7 @@ NEONPreAllocPass::FormsRegSequence(MachineInstr *MI,
"expected a virtual register");
// Extracting from a Q or QQ register.
MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
- if (!DefMI || !DefMI->isExtractSubreg())
+ if (!DefMI || !DefMI->isCopy() || !DefMI->getOperand(1).getSubReg())
return false;
VirtReg = DefMI->getOperand(1).getReg();
if (LastSrcReg && LastSrcReg != VirtReg)
@@ -418,7 +418,7 @@ NEONPreAllocPass::FormsRegSequence(MachineInstr *MI,
RC != ARM::QQPRRegisterClass &&
RC != ARM::QQQQPRRegisterClass)
return false;
- unsigned SubIdx = DefMI->getOperand(2).getImm();
+ unsigned SubIdx = DefMI->getOperand(1).getSubReg();
if (LastSubIdx) {
if (LastSubIdx != SubIdx-Stride)
return false;
@@ -434,22 +434,21 @@ NEONPreAllocPass::FormsRegSequence(MachineInstr *MI,
// FIXME: Update the uses of EXTRACT_SUBREG from REG_SEQUENCE is
// currently required for correctness. e.g.
- // %reg1041;<def> = REG_SEQUENCE %reg1040<kill>, 5, %reg1035<kill>, 6
+ // %reg1041<def> = REG_SEQUENCE %reg1040<kill>, 5, %reg1035<kill>, 6
// %reg1042<def> = EXTRACT_SUBREG %reg1041, 6
// %reg1043<def> = EXTRACT_SUBREG %reg1041, 5
// VST1q16 %reg1025<kill>, 0, %reg1043<kill>, %reg1042<kill>,
- // reg1025 and reg1043 should be replaced with reg1041:6 and reg1041:5
+ // reg1042 and reg1043 should be replaced with reg1041:6 and reg1041:5
// respectively.
// We need to change how we model uses of REG_SEQUENCE.
for (unsigned R = 0; R < NumRegs; ++R) {
MachineOperand &MO = MI->getOperand(FirstOpnd + R);
unsigned OldReg = MO.getReg();
MachineInstr *DefMI = MRI->getVRegDef(OldReg);
- assert(DefMI->isExtractSubreg());
+ assert(DefMI->isCopy());
MO.setReg(LastSrcReg);
MO.setSubReg(SubIds[R]);
- if (R != 0)
- MO.setIsKill(false);
+ MO.setIsKill(false);
// Delete the EXTRACT_SUBREG if its result is now dead.
if (MRI->use_empty(OldReg))
DefMI->eraseFromParent();
@@ -467,43 +466,9 @@ bool NEONPreAllocPass::PreAllocNEONRegisters(MachineBasicBlock &MBB) {
unsigned FirstOpnd, NumRegs, Offset, Stride;
if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride))
continue;
- if (llvm::ModelWithRegSequence() &&
- FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
+ if (FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
continue;
-
- MachineBasicBlock::iterator NextI = llvm::next(MBBI);
- for (unsigned R = 0; R < NumRegs; ++R) {
- MachineOperand &MO = MI->getOperand(FirstOpnd + R);
- assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
- unsigned VirtReg = MO.getReg();
- assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "expected a virtual register");
-
- // For now, just assign a fixed set of adjacent registers.
- // This leaves plenty of room for future improvements.
- static const unsigned NEONDRegs[] = {
- ARM::D0, ARM::D1, ARM::D2, ARM::D3,
- ARM::D4, ARM::D5, ARM::D6, ARM::D7
- };
- MO.setReg(NEONDRegs[Offset + R * Stride]);
-
- if (MO.isUse()) {
- // Insert a copy from VirtReg.
- TII->copyRegToReg(MBB, MBBI, MO.getReg(), VirtReg,
- ARM::DPRRegisterClass, ARM::DPRRegisterClass,
- DebugLoc());
- if (MO.isKill()) {
- MachineInstr *CopyMI = prior(MBBI);
- CopyMI->findRegisterUseOperand(VirtReg)->setIsKill();
- }
- MO.setIsKill();
- } else if (MO.isDef() && !MO.isDead()) {
- // Add a copy to VirtReg.
- TII->copyRegToReg(MBB, NextI, VirtReg, MO.getReg(),
- ARM::DPRRegisterClass, ARM::DPRRegisterClass,
- DebugLoc());
- }
- }
+ llvm_unreachable("expected a REG_SEQUENCE");
}
return Modified;
diff --git a/contrib/llvm/lib/Target/ARM/README.txt b/contrib/llvm/lib/Target/ARM/README.txt
index 85d5ca0..0cb8ff0 100644
--- a/contrib/llvm/lib/Target/ARM/README.txt
+++ b/contrib/llvm/lib/Target/ARM/README.txt
@@ -590,3 +590,70 @@ than the Z bit, we'll need additional logic to reverse the conditionals
associated with the comparison. Perhaps a pseudo-instruction for the comparison,
with a post-codegen pass to clean up and handle the condition codes?
See PR5694 for testcase.
+
+//===---------------------------------------------------------------------===//
+
+Given the following on armv5:
+int test1(int A, int B) {
+ return (A&-8388481)|(B&8388480);
+}
+
+We currently generate:
+ ldr r2, .LCPI0_0
+ and r0, r0, r2
+ ldr r2, .LCPI0_1
+ and r1, r1, r2
+ orr r0, r1, r0
+ bx lr
+
+We should be able to replace the second ldr+and with a bic (i.e. reuse the
+constant which was already loaded). Not sure what's necessary to do that.
+
+//===---------------------------------------------------------------------===//
+
+Given the following on ARMv7:
+int test1(int A, int B) {
+ return (A&-8388481)|(B&8388480);
+}
+
+We currently generate:
+ bfc r0, #7, #16
+ movw r2, #:lower16:8388480
+ movt r2, #:upper16:8388480
+ and r1, r1, r2
+ orr r0, r1, r0
+ bx lr
+
+The following is much shorter:
+ lsr r1, r1, #7
+ bfi r0, r1, #7, #16
+ bx lr
+
+
+//===---------------------------------------------------------------------===//
+
+The code generated for bswap on armv4/5 (CPUs without rev) is less than ideal:
+
+int a(int x) { return __builtin_bswap32(x); }
+
+a:
+ mov r1, #255, 24
+ mov r2, #255, 16
+ and r1, r1, r0, lsr #8
+ and r2, r2, r0, lsl #8
+ orr r1, r1, r0, lsr #24
+ orr r0, r2, r0, lsl #24
+ orr r0, r0, r1
+ bx lr
+
+Something like the following would be better (fewer instructions/registers):
+ eor r1, r0, r0, ror #16
+ bic r1, r1, #0xff0000
+ mov r1, r1, lsr #8
+ eor r0, r1, r0, ror #8
+ bx lr
+
+A custom Thumb version would also be a slight improvement over the generic
+version.
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index fae84d4..af630ac 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -33,64 +33,24 @@ unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
-bool Thumb1InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVtgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- }
- } else if (DestRC == ARM::tGPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
- return true;
- }
- }
-
- return false;
-}
-
-bool Thumb1InstrInfo::
-canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- switch (Opc) {
- default: break;
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVgpr2gpr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- !isARMLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- return false;
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- !isARMLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- return false;
- }
- return true;
- }
- }
-
- return false;
+void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ bool tDest = ARM::tGPRRegClass.contains(DestReg);
+ bool tSrc = ARM::tGPRRegClass.contains(SrcReg);
+ unsigned Opc = ARM::tMOVgpr2gpr;
+ if (tDest && tSrc)
+ Opc = ARM::tMOVr;
+ else if (tSrc)
+ Opc = ARM::tMOVtgpr2gpr;
+ else if (tDest)
+ Opc = ARM::tMOVgpr2tgpr;
+
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ assert(ARM::GPRRegClass.contains(DestReg, SrcReg) &&
+ "Thumb1 can only copy GPR registers");
}
void Thumb1InstrInfo::
@@ -175,10 +135,10 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
isKill = false;
}
- if (isKill) {
+ if (isKill)
MBB.addLiveIn(Reg);
- MIB.addReg(Reg, RegState::Kill);
- }
+
+ MIB.addReg(Reg, getKillRegState(isKill));
}
return true;
}
@@ -221,46 +181,3 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
return true;
}
-
-MachineInstr *Thumb1InstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = NULL;
- switch (Opc) {
- default: break;
- case ARM::tMOVr:
- case ARM::tMOVtgpr2gpr:
- case ARM::tMOVgpr2tgpr:
- case ARM::tMOVgpr2gpr: {
- if (OpNum == 0) { // move -> store
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
- !isARMLowRegister(SrcReg))
- // tSpill cannot take a high register operand.
- break;
- NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
- .addReg(SrcReg, getKillRegState(isKill))
- .addFrameIndex(FI).addImm(0));
- } else { // move -> load
- unsigned DstReg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
- !isARMLowRegister(DstReg))
- // tRestore cannot target a high register operand.
- break;
- bool isDead = MI->getOperand(0).isDead();
- NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
- .addReg(DstReg,
- RegState::Define | getDeadRegState(isDead))
- .addFrameIndex(FI).addImm(0));
- }
- break;
- }
- }
-
- return NewMI;
-}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index c937296..555135a 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -46,12 +46,10 @@ public:
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const;
- bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -64,20 +62,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
- MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
};
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
index 2f635fe..39b70b4 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -68,21 +68,6 @@ void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
.addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg);
}
-const TargetRegisterClass*
-Thumb1RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, EVT VT) const {
- if (isARMLowRegister(Reg))
- return ARM::tGPRRegisterClass;
- switch (Reg) {
- default:
- break;
- case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
- case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
- return ARM::GPRRegisterClass;
- }
-
- return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
-}
-
bool Thumb1RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
const MachineFrameInfo *FFI = MF.getFrameInfo();
unsigned CFSize = FFI->getMaxCallFrameSize();
@@ -410,6 +395,8 @@ Thumb1RegisterInfo::saveScavengerRegister(MachineBasicBlock &MBB,
// before that instead and adjust the UseMI.
bool done = false;
for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
+ if (II->isDebugValue())
+ continue;
// If this instruction affects R12, adjust our restore point.
for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = II->getOperand(i);
diff --git a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
index 4eca367..9a0308af 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -38,9 +38,6 @@ public:
unsigned PredReg = 0) const;
/// Code Generation virtual methods...
- const TargetRegisterClass *
- getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
-
bool hasReservedCallFrame(MachineFunction &MF) const;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
@@ -51,7 +48,8 @@ public:
// could not be handled directly in MI.
int rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int Offset,
- unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const;
+ unsigned MOVOpc, unsigned ADDriOpc,
+ unsigned SUBriOpc) const;
bool saveScavengerRegister(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp
new file mode 100644
index 0000000..172908d
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.cpp
@@ -0,0 +1,53 @@
+//===-- Thumb2HazardRecognizer.cpp - Thumb2 postra hazard recognizer ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARM.h"
+#include "Thumb2HazardRecognizer.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+using namespace llvm;
+
+ScheduleHazardRecognizer::HazardType
+Thumb2HazardRecognizer::getHazardType(SUnit *SU) {
+ if (ITBlockSize) {
+ MachineInstr *MI = SU->getInstr();
+ if (!MI->isDebugValue() && MI != ITBlockMIs[ITBlockSize-1])
+ return Hazard;
+ }
+
+ return PostRAHazardRecognizer::getHazardType(SU);
+}
+
+void Thumb2HazardRecognizer::Reset() {
+ ITBlockSize = 0;
+ PostRAHazardRecognizer::Reset();
+}
+
+void Thumb2HazardRecognizer::EmitInstruction(SUnit *SU) {
+ MachineInstr *MI = SU->getInstr();
+ unsigned Opcode = MI->getOpcode();
+ if (ITBlockSize) {
+ --ITBlockSize;
+ } else if (Opcode == ARM::t2IT) {
+ unsigned Mask = MI->getOperand(1).getImm();
+ unsigned NumTZ = CountTrailingZeros_32(Mask);
+ assert(NumTZ <= 3 && "Invalid IT mask!");
+ ITBlockSize = 4 - NumTZ;
+ MachineBasicBlock::iterator I = MI;
+ for (unsigned i = 0; i < ITBlockSize; ++i) {
+ // Advance to the next instruction, skipping any dbg_value instructions.
+ do {
+ ++I;
+ } while (I->isDebugValue());
+ ITBlockMIs[ITBlockSize-1-i] = &*I;
+ }
+ }
+
+ PostRAHazardRecognizer::EmitInstruction(SU);
+}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h
new file mode 100644
index 0000000..4726658
--- /dev/null
+++ b/contrib/llvm/lib/Target/ARM/Thumb2HazardRecognizer.h
@@ -0,0 +1,40 @@
+//===-- Thumb2HazardRecognizer.h - Thumb2 Hazard Recognizers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines hazard recognizers for scheduling Thumb2 functions on
+// ARM processors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef THUMB2HAZARDRECOGNIZER_H
+#define THUMB2HAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
+
+namespace llvm {
+
+class MachineInstr;
+
+class Thumb2HazardRecognizer : public PostRAHazardRecognizer {
+ unsigned ITBlockSize; // No. of MIs in current IT block yet to be scheduled.
+ MachineInstr *ITBlockMIs[4];
+
+public:
+ Thumb2HazardRecognizer(const InstrItineraryData &ItinData) :
+ PostRAHazardRecognizer(ItinData) {}
+
+ virtual HazardType getHazardType(SUnit *SU);
+ virtual void Reset();
+ virtual void EmitInstruction(SUnit *SU);
+};
+
+
+} // end namespace llvm
+
+#endif // THUMB2HAZARDRECOGNIZER_H
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
index f36d4ef..cd15bbe 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -14,17 +14,23 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
-STATISTIC(NumITs, "Number of IT blocks inserted");
+STATISTIC(NumITs, "Number of IT blocks inserted");
+STATISTIC(NumMovedInsts, "Number of predicated instructions moved");
namespace {
- struct Thumb2ITBlockPass : public MachineFunctionPass {
+ class Thumb2ITBlockPass : public MachineFunctionPass {
+ bool PreRegAlloc;
+
+ public:
static char ID;
Thumb2ITBlockPass() : MachineFunctionPass(&ID) {}
const Thumb2InstrInfo *TII;
+ const TargetRegisterInfo *TRI;
ARMFunctionInfo *AFI;
virtual bool runOnMachineFunction(MachineFunction &Fn);
@@ -34,61 +40,167 @@ namespace {
}
private:
- bool InsertITBlocks(MachineBasicBlock &MBB);
+ bool MoveCopyOutOfITBlock(MachineInstr *MI,
+ ARMCC::CondCodes CC, ARMCC::CondCodes OCC,
+ SmallSet<unsigned, 4> &Defs,
+ SmallSet<unsigned, 4> &Uses);
+ bool InsertITInstructions(MachineBasicBlock &MBB);
};
char Thumb2ITBlockPass::ID = 0;
}
-static ARMCC::CondCodes getPredicate(const MachineInstr *MI, unsigned &PredReg){
- unsigned Opc = MI->getOpcode();
- if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
- return ARMCC::AL;
- return llvm::getInstrPredicate(MI, PredReg);
+/// TrackDefUses - Tracking what registers are being defined and used by
+/// instructions in the IT block. This also tracks "dependencies", i.e. uses
+/// in the IT block that are defined before the IT instruction.
+static void TrackDefUses(MachineInstr *MI,
+ SmallSet<unsigned, 4> &Defs,
+ SmallSet<unsigned, 4> &Uses,
+ const TargetRegisterInfo *TRI) {
+ SmallVector<unsigned, 4> LocalDefs;
+ SmallVector<unsigned, 4> LocalUses;
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg || Reg == ARM::ITSTATE || Reg == ARM::SP)
+ continue;
+ if (MO.isUse())
+ LocalUses.push_back(Reg);
+ else
+ LocalDefs.push_back(Reg);
+ }
+
+ for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
+ unsigned Reg = LocalUses[i];
+ Uses.insert(Reg);
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ Uses.insert(*Subreg);
+ }
+
+ for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
+ unsigned Reg = LocalDefs[i];
+ Defs.insert(Reg);
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ Defs.insert(*Subreg);
+ if (Reg == ARM::CPSR)
+ continue;
+ }
+}
+
+bool
+Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI,
+ ARMCC::CondCodes CC, ARMCC::CondCodes OCC,
+ SmallSet<unsigned, 4> &Defs,
+ SmallSet<unsigned, 4> &Uses) {
+ unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
+ if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
+ assert(SrcSubIdx == 0 && DstSubIdx == 0 &&
+ "Sub-register indices still around?");
+ // llvm models select's as two-address instructions. That means a copy
+ // is inserted before a t2MOVccr, etc. If the copy is scheduled in
+ // between selects we would end up creating multiple IT blocks.
+
+ // First check if it's safe to move it.
+ if (Uses.count(DstReg) || Defs.count(SrcReg))
+ return false;
+
+ // Then peek at the next instruction to see if it's predicated on CC or OCC.
+ // If not, then there is nothing to be gained by moving the copy.
+ MachineBasicBlock::iterator I = MI; ++I;
+ MachineBasicBlock::iterator E = MI->getParent()->end();
+ while (I != E && I->isDebugValue())
+ ++I;
+ if (I != E) {
+ unsigned NPredReg = 0;
+ ARMCC::CondCodes NCC = llvm::getITInstrPredicate(I, NPredReg);
+ if (NCC == CC || NCC == OCC)
+ return true;
+ }
+ }
+ return false;
}
-bool Thumb2ITBlockPass::InsertITBlocks(MachineBasicBlock &MBB) {
+bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
bool Modified = false;
+ SmallSet<unsigned, 4> Defs;
+ SmallSet<unsigned, 4> Uses;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineInstr *MI = &*MBBI;
DebugLoc dl = MI->getDebugLoc();
unsigned PredReg = 0;
- ARMCC::CondCodes CC = getPredicate(MI, PredReg);
-
+ ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
if (CC == ARMCC::AL) {
++MBBI;
continue;
}
+ Defs.clear();
+ Uses.clear();
+ TrackDefUses(MI, Defs, Uses, TRI);
+
// Insert an IT instruction.
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
.addImm(CC);
+
+ // Add implicit use of ITSTATE to IT block instructions.
+ MI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
+ true/*isImp*/, false/*isKill*/));
+
+ MachineInstr *LastITMI = MI;
+ MachineBasicBlock::iterator InsertPos = MIB;
++MBBI;
- // Finalize IT mask.
+ // Form IT block.
ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
unsigned Mask = 0, Pos = 3;
// Branches, including tricky ones like LDM_RET, need to end an IT
// block so check the instruction we just put in the block.
- while (MBBI != E && Pos &&
- (!MI->getDesc().isBranch() && !MI->getDesc().isReturn())) {
+ for (; MBBI != E && Pos &&
+ (!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) {
+ if (MBBI->isDebugValue())
+ continue;
+
MachineInstr *NMI = &*MBBI;
MI = NMI;
- DebugLoc ndl = NMI->getDebugLoc();
+
unsigned NPredReg = 0;
- ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
- if (NCC == CC || NCC == OCC)
+ ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
+ if (NCC == CC || NCC == OCC) {
Mask |= (NCC & 1) << Pos;
- else
+ // Add implicit use of ITSTATE.
+ NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
+ true/*isImp*/, false/*isKill*/));
+ LastITMI = NMI;
+ } else {
+ if (NCC == ARMCC::AL &&
+ MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
+ --MBBI;
+ MBB.remove(NMI);
+ MBB.insert(InsertPos, NMI);
+ ++NumMovedInsts;
+ continue;
+ }
break;
+ }
+ TrackDefUses(NMI, Defs, Uses, TRI);
--Pos;
- ++MBBI;
}
+
+ // Finalize IT mask.
Mask |= (1 << Pos);
// Tag along (firstcond[0] << 4) with the mask.
Mask |= (CC & 1) << 4;
MIB.addImm(Mask);
+
+ // Last instruction in IT block kills ITSTATE.
+ LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();
+
Modified = true;
++NumITs;
}
@@ -100,17 +212,21 @@ bool Thumb2ITBlockPass::runOnMachineFunction(MachineFunction &Fn) {
const TargetMachine &TM = Fn.getTarget();
AFI = Fn.getInfo<ARMFunctionInfo>();
TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
+ TRI = TM.getRegisterInfo();
if (!AFI->isThumbFunction())
return false;
bool Modified = false;
- for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
- ++MFI) {
+ for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; ) {
MachineBasicBlock &MBB = *MFI;
- Modified |= InsertITBlocks(MBB);
+ ++MFI;
+ Modified |= InsertITInstructions(MBB);
}
+ if (Modified)
+ AFI->setHasITBlocks(true);
+
return Modified;
}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index 531d5e9..ee51727 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -17,15 +17,27 @@
#include "ARMAddressingModes.h"
#include "ARMGenInstrInfo.inc"
#include "ARMMachineFunctionInfo.h"
+#include "Thumb2HazardRecognizer.h"
+#include "Thumb2InstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/ADT/SmallVector.h"
-#include "Thumb2InstrInfo.h"
+#include "llvm/Support/CommandLine.h"
using namespace llvm;
+static cl::opt<unsigned>
+IfCvtLimit("thumb2-ifcvt-limit", cl::Hidden,
+ cl::desc("Thumb2 if-conversion limit (default 3)"),
+ cl::init(3));
+
+static cl::opt<unsigned>
+IfCvtDiamondLimit("thumb2-ifcvt-diamond-limit", cl::Hidden,
+ cl::desc("Thumb2 diamond if-conversion limit (default 3)"),
+ cl::init(3));
+
Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
@@ -35,33 +47,99 @@ unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
-bool
-Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVtgpr2gpr), DestReg).addReg(SrcReg);
- return true;
- }
- } else if (DestRC == ARM::tGPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
- return true;
- } else if (SrcRC == ARM::tGPRRegisterClass) {
- BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
- return true;
+void
+Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const {
+ MachineBasicBlock *MBB = Tail->getParent();
+ ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
+ if (!AFI->hasITBlocks()) {
+ TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
+ return;
+ }
+
+ // If the first instruction of Tail is predicated, we may have to update
+ // the IT instruction.
+ unsigned PredReg = 0;
+ ARMCC::CondCodes CC = llvm::getInstrPredicate(Tail, PredReg);
+ MachineBasicBlock::iterator MBBI = Tail;
+ if (CC != ARMCC::AL)
+ // Expecting at least the t2IT instruction before it.
+ --MBBI;
+
+ // Actually replace the tail.
+ TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
+
+ // Fix up IT.
+ if (CC != ARMCC::AL) {
+ MachineBasicBlock::iterator E = MBB->begin();
+ unsigned Count = 4; // At most 4 instructions in an IT block.
+ while (Count && MBBI != E) {
+ if (MBBI->isDebugValue()) {
+ --MBBI;
+ continue;
+ }
+ if (MBBI->getOpcode() == ARM::t2IT) {
+ unsigned Mask = MBBI->getOperand(1).getImm();
+ if (Count == 4)
+ MBBI->eraseFromParent();
+ else {
+ unsigned MaskOn = 1 << Count;
+ unsigned MaskOff = ~(MaskOn - 1);
+ MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
+ }
+ return;
+ }
+ --MBBI;
+ --Count;
}
+
+ // Ctrl flow can reach here if branch folding is run before IT block
+ // formation pass.
}
+}
+
+bool
+Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ unsigned PredReg = 0;
+ return llvm::getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
+}
+bool Thumb2InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumInstrs) const {
+ return NumInstrs && NumInstrs <= IfCvtLimit;
+}
+
+bool Thumb2InstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
+ MachineBasicBlock &FMBB, unsigned NumF) const {
+ // FIXME: Catch optimization such as:
+ // r0 = movne
+ // r0 = moveq
+ return NumT && NumF &&
+ NumT <= (IfCvtDiamondLimit) && NumF <= (IfCvtDiamondLimit);
+}
+
+void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
// Handle SPR, DPR, and QPR copies.
- return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC, DL);
+ if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
+ return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
+
+ bool tDest = ARM::tGPRRegClass.contains(DestReg);
+ bool tSrc = ARM::tGPRRegClass.contains(SrcReg);
+ unsigned Opc = ARM::tMOVgpr2gpr;
+ if (tDest && tSrc)
+ Opc = ARM::tMOVr;
+ else if (tSrc)
+ Opc = ARM::tMOVtgpr2gpr;
+ else if (tDest)
+ Opc = ARM::tMOVgpr2tgpr;
+
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
void Thumb2InstrInfo::
@@ -69,7 +147,8 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
+ if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
+ RC == ARM::tcGPRRegisterClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -94,7 +173,8 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
+ if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
+ RC == ARM::tcGPRRegisterClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -113,6 +193,11 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
}
+ScheduleHazardRecognizer *Thumb2InstrInfo::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
+ return (ScheduleHazardRecognizer *)new Thumb2HazardRecognizer(II);
+}
+
void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,
@@ -131,14 +216,14 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
// Use a movw to materialize the 16-bit constant.
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
.addImm(NumBytes)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
+ .addImm((unsigned)Pred).addReg(PredReg);
Fits = true;
} else if ((NumBytes & 0xffff) == 0) {
// Use a movt to materialize the 32-bit constant.
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
.addReg(DestReg)
.addImm(NumBytes >> 16)
- .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
+ .addImm((unsigned)Pred).addReg(PredReg);
Fits = true;
}
@@ -502,3 +587,54 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
Offset = (isSub) ? -Offset : Offset;
return Offset == 0;
}
+
+/// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+/// two-addrss instruction inserted by two-address pass.
+void
+Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ if (SrcMI->getOpcode() != ARM::tMOVgpr2gpr ||
+ SrcMI->getOperand(1).isKill())
+ return;
+
+ unsigned PredReg = 0;
+ ARMCC::CondCodes CC = llvm::getInstrPredicate(UseMI, PredReg);
+ if (CC == ARMCC::AL || PredReg != ARM::CPSR)
+ return;
+
+ // Schedule the copy so it doesn't come between previous instructions
+ // and UseMI which can form an IT block.
+ unsigned SrcReg = SrcMI->getOperand(1).getReg();
+ ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
+ MachineBasicBlock *MBB = UseMI->getParent();
+ MachineBasicBlock::iterator MBBI = SrcMI;
+ unsigned NumInsts = 0;
+ while (--MBBI != MBB->begin()) {
+ if (MBBI->isDebugValue())
+ continue;
+
+ MachineInstr *NMI = &*MBBI;
+ ARMCC::CondCodes NCC = llvm::getInstrPredicate(NMI, PredReg);
+ if (!(NCC == CC || NCC == OCC) ||
+ NMI->modifiesRegister(SrcReg, &TRI) ||
+ NMI->definesRegister(ARM::CPSR))
+ break;
+ if (++NumInsts == 4)
+ // Too many in a row!
+ return;
+ }
+
+ if (NumInsts) {
+ MBB->remove(SrcMI);
+ MBB->insert(++MBBI, SrcMI);
+ }
+}
+
+ARMCC::CondCodes
+llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
+ unsigned Opc = MI->getOpcode();
+ if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
+ return ARMCC::AL;
+ return llvm::getInstrPredicate(MI, PredReg);
+}
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 2948770..3a9f8b1 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/contrib/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -20,7 +20,8 @@
#include "Thumb2RegisterInfo.h"
namespace llvm {
- class ARMSubtarget;
+class ARMSubtarget;
+class ScheduleHazardRecognizer;
class Thumb2InstrInfo : public ARMBaseInstrInfo {
Thumb2RegisterInfo RI;
@@ -31,12 +32,21 @@ public:
// if there is not such an opcode.
unsigned getUnindexedOpcode(unsigned Opc) const;
- bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const;
+
+ bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const;
+
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+ MachineBasicBlock &FMBB, unsigned NumFInstrs) const;
+
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -50,12 +60,27 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ void scheduleTwoAddrSource(MachineInstr *SrcMI, MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const;
+
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
+
+ ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const;
};
+
+/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
+/// to llvm::getInstrPredicate except it returns AL for conditional branch
+/// instructions which are "predicated", but are not in IT blocks.
+ARMCC::CondCodes getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
+
+
}
#endif // THUMB2INSTRUCTIONINFO_H
diff --git a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
index 8fe2e42..ba392f3 100644
--- a/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/contrib/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -451,11 +451,18 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
return false;
- const TargetInstrDesc &TID = MI->getDesc();
unsigned Reg0 = MI->getOperand(0).getReg();
unsigned Reg1 = MI->getOperand(1).getReg();
- if (Reg0 != Reg1)
- return false;
+ if (Reg0 != Reg1) {
+ // Try to commute the operands to make it a 2-address instruction.
+ unsigned CommOpIdx1, CommOpIdx2;
+ if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
+ CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
+ return false;
+ MachineInstr *CommutedMI = TII->commuteInstruction(MI);
+ if (!CommutedMI)
+ return false;
+ }
if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
return false;
if (Entry.Imm2Limit) {
@@ -484,6 +491,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
bool HasCC = false;
bool CCDead = false;
+ const TargetInstrDesc &TID = MI->getDesc();
if (TID.hasOptionalDef()) {
unsigned NumOps = TID.getNumOperands();
HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
@@ -689,7 +697,7 @@ bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
goto ProcessNext;
}
- // Try to transform ro a 16-bit non-two-address instruction.
+ // Try to transform to a 16-bit non-two-address instruction.
if (Entry.NarrowOpc1 && ReduceToNarrow(MBB, MI, Entry, LiveCPSR)) {
Modified = true;
MachineBasicBlock::iterator I = prior(NextMII);
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
index 1d85f12..ea78bf3 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -224,6 +224,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -251,7 +252,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -425,7 +426,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
}
} else { //more args
// Create the frame index object for this incoming parameter...
- int FI = MFI->CreateFixedObject(8, 8 * (ArgNo - 6), true, false);
+ int FI = MFI->CreateFixedObject(8, 8 * (ArgNo - 6), true);
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
@@ -444,7 +445,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
if (TargetRegisterInfo::isPhysicalRegister(args_int[i]))
args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass);
SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], MVT::i64);
- int FI = MFI->CreateFixedObject(8, -8 * (6 - i), true, false);
+ int FI = MFI->CreateFixedObject(8, -8 * (6 - i), true);
if (i == 0) FuncInfo->setVarArgsBase(FI);
SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0,
@@ -453,7 +454,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass);
argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
- FI = MFI->CreateFixedObject(8, - 8 * (12 - i), true, false);
+ FI = MFI->CreateFixedObject(8, - 8 * (12 - i), true);
SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0,
false, false, 0));
@@ -470,6 +471,7 @@ SDValue
AlphaTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26,
@@ -483,7 +485,7 @@ AlphaTargetLowering::LowerReturn(SDValue Chain,
break;
//return SDValue(); // ret void is legal
case 1: {
- EVT ArgVT = Outs[0].Val.getValueType();
+ EVT ArgVT = Outs[0].VT;
unsigned ArgReg;
if (ArgVT.isInteger())
ArgReg = Alpha::R0;
@@ -492,13 +494,13 @@ AlphaTargetLowering::LowerReturn(SDValue Chain,
ArgReg = Alpha::F0;
}
Copy = DAG.getCopyToReg(Copy, dl, ArgReg,
- Outs[0].Val, Copy.getValue(1));
+ OutVals[0], Copy.getValue(1));
if (DAG.getMachineFunction().getRegInfo().liveout_empty())
DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg);
break;
}
case 2: {
- EVT ArgVT = Outs[0].Val.getValueType();
+ EVT ArgVT = Outs[0].VT;
unsigned ArgReg1, ArgReg2;
if (ArgVT.isInteger()) {
ArgReg1 = Alpha::R0;
@@ -509,13 +511,13 @@ AlphaTargetLowering::LowerReturn(SDValue Chain,
ArgReg2 = Alpha::F1;
}
Copy = DAG.getCopyToReg(Copy, dl, ArgReg1,
- Outs[0].Val, Copy.getValue(1));
+ OutVals[0], Copy.getValue(1));
if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg1)
== DAG.getMachineFunction().getRegInfo().liveout_end())
DAG.getMachineFunction().getRegInfo().addLiveOut(ArgReg1);
Copy = DAG.getCopyToReg(Copy, dl, ArgReg2,
- Outs[1].Val, Copy.getValue(1));
+ OutVals[1], Copy.getValue(1));
if (std::find(DAG.getMachineFunction().getRegInfo().liveout_begin(),
DAG.getMachineFunction().getRegInfo().liveout_end(), ArgReg2)
== DAG.getMachineFunction().getRegInfo().liveout_end())
@@ -539,7 +541,7 @@ void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
false, false, 0);
SDValue Tmp = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
DAG.getConstant(8, MVT::i64));
- SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Base.getValue(1),
+ SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Base.getValue(1),
Tmp, NULL, 0, MVT::i32, false, false, 0);
DataPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Base, Offset);
if (N->getValueType(0).isFloatingPoint())
@@ -643,10 +645,12 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset());
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
+ GSDN->getOffset());
// FIXME there isn't really any debug info here
- // if (!GV->hasWeakLinkage() && !GV->isDeclaration() && !GV->hasLinkOnceLinkage()) {
+ // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
+ // && !GV->hasLinkOnceLinkage()) {
if (GV->hasLocalLinkage()) {
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
@@ -702,7 +706,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
SDValue Result;
if (Op.getValueType() == MVT::i32)
- Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Chain, DataPtr,
+ Result = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Chain, DataPtr,
NULL, 0, MVT::i32, false, false, 0);
else
Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr, NULL, 0,
@@ -722,7 +726,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
false, false, 0);
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
DAG.getConstant(8, MVT::i64));
- Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Result,
+ Val = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, dl, Result,
NP, NULL,0, MVT::i32, false, false, 0);
SDValue NPD = DAG.getNode(ISD::ADD, dl, MVT::i64, DestP,
DAG.getConstant(8, MVT::i64));
@@ -863,7 +867,10 @@ AlphaTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *llscMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- sinkMBB->transferSuccessors(thisMBB);
+ sinkMBB->splice(sinkMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ thisMBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
F->insert(It, llscMBB);
F->insert(It, sinkMBB);
@@ -912,7 +919,7 @@ AlphaTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
thisMBB->addSuccessor(llscMBB);
llscMBB->addSuccessor(llscMBB);
llscMBB->addSuccessor(sinkMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return sinkMBB;
}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
index 7ee823a..46e0c7d 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaISelLowering.h
@@ -121,6 +121,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -129,6 +130,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
};
}
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td b/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td
index d984556..6f4ebf2 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td
+++ b/contrib/llvm/lib/Target/Alpha/AlphaInstrFormats.td
@@ -182,7 +182,7 @@ class OForm4<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, Inst
bits<5> Rb;
bits<7> Function = fun;
-// let isTwoAddress = 1;
+// let Constraints = "$RFALSE = $RDEST";
let Inst{25-21} = Ra;
let Inst{20-16} = Rb;
let Inst{15-13} = 0;
@@ -223,7 +223,7 @@ class OForm4L<bits<6> opcode, bits<7> fun, string asmstr, list<dag> pattern, Ins
bits<8> LIT;
bits<7> Function = fun;
-// let isTwoAddress = 1;
+// let Constraints = "$RFALSE = $RDEST";
let Inst{25-21} = Ra;
let Inst{20-13} = LIT;
let Inst{12} = 1;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp
index 3aba363..ad625a2 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.cpp
@@ -110,9 +110,8 @@ static bool isAlphaIntCondCode(unsigned Opcode) {
unsigned AlphaInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
"Alpha branch conditions have two components!");
@@ -120,58 +119,47 @@ unsigned AlphaInstrInfo::InsertBranch(MachineBasicBlock &MBB,
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, dl, get(Alpha::BR)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(TBB);
else // Conditional branch
if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_I))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_F))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
// Two-way Conditional Branch.
if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_I))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_F))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(Alpha::BR)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(FBB);
return 2;
}
-bool AlphaInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- //cerr << "copyRegToReg " << DestReg << " <- " << SrcReg << "\n";
- if (DestRC != SrcRC) {
- // Not yet supported!
- return false;
- }
-
- if (DestRC == Alpha::GPRCRegisterClass) {
+void AlphaInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (Alpha::GPRCRegClass.contains(DestReg, SrcReg)) {
BuildMI(MBB, MI, DL, get(Alpha::BISr), DestReg)
.addReg(SrcReg)
- .addReg(SrcReg);
- } else if (DestRC == Alpha::F4RCRegisterClass) {
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else if (Alpha::F4RCRegClass.contains(DestReg, SrcReg)) {
BuildMI(MBB, MI, DL, get(Alpha::CPYSS), DestReg)
.addReg(SrcReg)
- .addReg(SrcReg);
- } else if (DestRC == Alpha::F8RCRegisterClass) {
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else if (Alpha::F8RCRegClass.contains(DestReg, SrcReg)) {
BuildMI(MBB, MI, DL, get(Alpha::CPYST), DestReg)
.addReg(SrcReg)
- .addReg(SrcReg);
+ .addReg(SrcReg, getKillRegState(KillSrc));
} else {
- // Attempt to copy register that is not GPR or FPR
- return false;
+ llvm_unreachable("Attempt to copy register that is not GPR or FPR");
}
-
- return true;
}
void
@@ -227,51 +215,6 @@ AlphaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Unhandled register class");
}
-MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
- if (Ops.size() != 1) return NULL;
-
- // Make sure this is a reg-reg copy.
- unsigned Opc = MI->getOpcode();
-
- MachineInstr *NewMI = NULL;
- switch(Opc) {
- default:
- break;
- case Alpha::BISr:
- case Alpha::CPYSS:
- case Alpha::CPYST:
- if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
- if (Ops[0] == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
- ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addFrameIndex(FrameIndex)
- .addReg(Alpha::F31);
- } else { // load -> move
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
- ((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
- .addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
- getUndefRegState(isUndef))
- .addFrameIndex(FrameIndex)
- .addReg(Alpha::F31);
- }
- }
- break;
- }
- return NewMI;
-}
-
static unsigned AlphaRevCondCode(unsigned Opcode) {
switch (Opcode) {
case Alpha::BEQ: return Alpha::BNE;
@@ -428,11 +371,8 @@ unsigned AlphaInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
GlobalBaseReg = RegInfo.createVirtualRegister(&Alpha::GPRCRegClass);
- bool Ok = TII->copyRegToReg(FirstMBB, MBBI, GlobalBaseReg, Alpha::R29,
- &Alpha::GPRCRegClass, &Alpha::GPRCRegClass,
- DebugLoc());
- assert(Ok && "Couldn't assign to global base register!");
- Ok = Ok; // Silence warning when assertions are turned off.
+ BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ GlobalBaseReg).addReg(Alpha::R29);
RegInfo.addLiveIn(Alpha::R29);
AlphaFI->setGlobalBaseReg(GlobalBaseReg);
@@ -456,11 +396,8 @@ unsigned AlphaInstrInfo::getGlobalRetAddr(MachineFunction *MF) const {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
GlobalRetAddr = RegInfo.createVirtualRegister(&Alpha::GPRCRegClass);
- bool Ok = TII->copyRegToReg(FirstMBB, MBBI, GlobalRetAddr, Alpha::R26,
- &Alpha::GPRCRegClass, &Alpha::GPRCRegClass,
- DebugLoc());
- assert(Ok && "Couldn't assign to global return address register!");
- Ok = Ok; // Silence warning when assertions are turned off.
+ BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ GlobalRetAddr).addReg(Alpha::R26);
RegInfo.addLiveIn(Alpha::R26);
AlphaFI->setGlobalRetAddr(GlobalRetAddr);
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h
index 7d7365b..e20e832 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.h
@@ -42,14 +42,13 @@ public:
int &FrameIndex) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -62,18 +61,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
index a47a29b..92de78a 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
+++ b/contrib/llvm/lib/Target/Alpha/AlphaInstrInfo.td
@@ -680,18 +680,32 @@ def CPYSNSt : FPForm<0x17, 0x021, "cpysn $RA,$RB,$RC",
}
//conditional moves, floats
-let OutOperandList = (outs F4RC:$RDEST), InOperandList = (ins F4RC:$RFALSE, F4RC:$RTRUE, F8RC:$RCOND),
- isTwoAddress = 1 in {
-def FCMOVEQS : FPForm<0x17, 0x02A, "fcmoveq $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if = zero
-def FCMOVGES : FPForm<0x17, 0x02D, "fcmovge $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if >= zero
-def FCMOVGTS : FPForm<0x17, 0x02F, "fcmovgt $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if > zero
-def FCMOVLES : FPForm<0x17, 0x02E, "fcmovle $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if <= zero
-def FCMOVLTS : FPForm<0x17, 0x02C, "fcmovlt $RCOND,$RTRUE,$RDEST",[], s_fcmov>; // FCMOVE if < zero
-def FCMOVNES : FPForm<0x17, 0x02B, "fcmovne $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if != zero
+let OutOperandList = (outs F4RC:$RDEST),
+ InOperandList = (ins F4RC:$RFALSE, F4RC:$RTRUE, F8RC:$RCOND),
+ Constraints = "$RTRUE = $RDEST" in {
+def FCMOVEQS : FPForm<0x17, 0x02A,
+ "fcmoveq $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; //FCMOVE if = zero
+def FCMOVGES : FPForm<0x17, 0x02D,
+ "fcmovge $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; //FCMOVE if >= zero
+def FCMOVGTS : FPForm<0x17, 0x02F,
+ "fcmovgt $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; //FCMOVE if > zero
+def FCMOVLES : FPForm<0x17, 0x02E,
+ "fcmovle $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; //FCMOVE if <= zero
+def FCMOVLTS : FPForm<0x17, 0x02C,
+ "fcmovlt $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; // FCMOVE if < zero
+def FCMOVNES : FPForm<0x17, 0x02B,
+ "fcmovne $RCOND,$RTRUE,$RDEST",
+ [], s_fcmov>; //FCMOVE if != zero
}
//conditional moves, doubles
-let OutOperandList = (outs F8RC:$RDEST), InOperandList = (ins F8RC:$RFALSE, F8RC:$RTRUE, F8RC:$RCOND),
- isTwoAddress = 1 in {
+let OutOperandList = (outs F8RC:$RDEST),
+ InOperandList = (ins F8RC:$RFALSE, F8RC:$RTRUE, F8RC:$RCOND),
+ Constraints = "$RTRUE = $RDEST" in {
def FCMOVEQT : FPForm<0x17, 0x02A, "fcmoveq $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
def FCMOVGET : FPForm<0x17, 0x02D, "fcmovge $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
def FCMOVGTT : FPForm<0x17, 0x02F, "fcmovgt $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.cpp
index c67c6a2..a35e884 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.cpp
@@ -14,7 +14,7 @@
#include "AlphaMCAsmInfo.h"
using namespace llvm;
-AlphaMCAsmInfo::AlphaMCAsmInfo(const Target &T, const StringRef &TT) {
+AlphaMCAsmInfo::AlphaMCAsmInfo(const Target &T, StringRef TT) {
AlignmentIsInBytes = false;
PrivateGlobalPrefix = "$";
GPRel32Directive = ".gprel32";
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.h
index c27065d..837844b 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaMCAsmInfo.h
@@ -14,14 +14,14 @@
#ifndef ALPHATARGETASMINFO_H
#define ALPHATARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
struct AlphaMCAsmInfo : public MCAsmInfo {
- explicit AlphaMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit AlphaMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
index c083d8c..dc9d935 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.cpp
@@ -74,20 +74,6 @@ const unsigned* AlphaRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
return CalleeSavedRegs;
}
-const TargetRegisterClass* const*
-AlphaRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &Alpha::GPRCRegClass, &Alpha::GPRCRegClass,
- &Alpha::GPRCRegClass, &Alpha::GPRCRegClass,
- &Alpha::GPRCRegClass, &Alpha::GPRCRegClass,
- &Alpha::F8RCRegClass, &Alpha::F8RCRegClass,
- &Alpha::F8RCRegClass, &Alpha::F8RCRegClass,
- &Alpha::F8RCRegClass, &Alpha::F8RCRegClass,
- &Alpha::F8RCRegClass, &Alpha::F8RCRegClass, 0
- };
- return CalleeSavedRegClasses;
-}
-
BitVector AlphaRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(Alpha::R15);
diff --git a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
index 720367a..f9fd87a 100644
--- a/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Alpha/AlphaRegisterInfo.h
@@ -30,9 +30,6 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const* getCalleeSavedRegClasses(
- const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
index b4da96c..80ee107 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
@@ -132,8 +132,8 @@ static void UpdateNodeOperand(SelectionDAG &DAG,
SDValue Val) {
SmallVector<SDValue, 8> ops(N->op_begin(), N->op_end());
ops[Num] = Val;
- SDValue New = DAG.UpdateNodeOperands(SDValue(N, 0), ops.data(), ops.size());
- DAG.ReplaceAllUsesWith(N, New.getNode());
+ SDNode *New = DAG.UpdateNodeOperands(N, ops.data(), ops.size());
+ DAG.ReplaceAllUsesWith(N, New);
}
// After instruction selection, insert COPY_TO_REGCLASS nodes to help in
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
index adf2118..6e828e1 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.cpp
@@ -143,7 +143,7 @@ SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
DebugLoc DL = Op.getDebugLoc();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- Op = DAG.getTargetGlobalAddress(GV, MVT::i32);
+ Op = DAG.getTargetGlobalAddress(GV, DL, MVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
}
@@ -205,8 +205,7 @@ BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
} else {
assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
unsigned ObjSize = VA.getLocVT().getStoreSize();
- int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(),
- true, false);
+ int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0,
false, false, 0));
@@ -220,6 +219,7 @@ SDValue
BlackfinTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
@@ -245,7 +245,7 @@ BlackfinTargetLowering::LowerReturn(SDValue Chain,
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue Opi = Outs[i].Val;
+ SDValue Opi = OutVals[i];
// Expand to i32 if necessary
switch (VA.getLocInfo()) {
@@ -278,6 +278,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -301,7 +302,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -357,7 +358,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
index a784248..6bebcc3 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinISelLowering.h
@@ -63,6 +63,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -71,6 +72,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp
index 73924b7..a74d42d 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.cpp
@@ -104,10 +104,8 @@ unsigned BlackfinInstrInfo::
InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc DL;
-
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -124,69 +122,73 @@ InsertBranch(MachineBasicBlock &MBB,
llvm_unreachable("Implement conditional branches!");
}
-static bool inClass(const TargetRegisterClass &Test,
- unsigned Reg,
- const TargetRegisterClass *RC) {
- if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return Test.contains(Reg);
- else
- return &Test==RC || Test.hasSubClass(RC);
-}
-
-bool BlackfinInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg,
- unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (inClass(BF::ALLRegClass, DestReg, DestRC) &&
- inClass(BF::ALLRegClass, SrcReg, SrcRC)) {
- BuildMI(MBB, I, DL, get(BF::MOVE), DestReg).addReg(SrcReg);
- return true;
+void BlackfinInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (BF::ALLRegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(BF::MOVE), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- if (inClass(BF::D16RegClass, DestReg, DestRC) &&
- inClass(BF::D16RegClass, SrcReg, SrcRC)) {
- BuildMI(MBB, I, DL, get(BF::SLL16i), DestReg).addReg(SrcReg).addImm(0);
- return true;
+ if (BF::D16RegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(BF::SLL16i), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0);
+ return;
}
- if (inClass(BF::AnyCCRegClass, SrcReg, SrcRC) &&
- inClass(BF::DRegClass, DestReg, DestRC)) {
- if (inClass(BF::NotCCRegClass, SrcReg, SrcRC)) {
- BuildMI(MBB, I, DL, get(BF::MOVENCC_z), DestReg).addReg(SrcReg);
+ if (BF::DRegClass.contains(DestReg)) {
+ if (SrcReg == BF::NCC) {
+ BuildMI(MBB, I, DL, get(BF::MOVENCC_z), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, I, DL, get(BF::BITTGL), DestReg).addReg(DestReg).addImm(0);
- } else {
- BuildMI(MBB, I, DL, get(BF::MOVECC_zext), DestReg).addReg(SrcReg);
+ return;
+ }
+ if (SrcReg == BF::CC) {
+ BuildMI(MBB, I, DL, get(BF::MOVECC_zext), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- return true;
}
- if (inClass(BF::AnyCCRegClass, DestReg, DestRC) &&
- inClass(BF::DRegClass, SrcReg, SrcRC)) {
- if (inClass(BF::NotCCRegClass, DestReg, DestRC))
- BuildMI(MBB, I, DL, get(BF::SETEQri_not), DestReg).addReg(SrcReg);
- else
- BuildMI(MBB, I, DL, get(BF::MOVECC_nz), DestReg).addReg(SrcReg);
- return true;
+ if (BF::DRegClass.contains(SrcReg)) {
+ if (DestReg == BF::NCC) {
+ BuildMI(MBB, I, DL, get(BF::SETEQri_not), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc)).addImm(0);
+ return;
+ }
+ if (DestReg == BF::CC) {
+ BuildMI(MBB, I, DL, get(BF::MOVECC_nz), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
}
- if (inClass(BF::NotCCRegClass, DestReg, DestRC) &&
- inClass(BF::JustCCRegClass, SrcReg, SrcRC)) {
- BuildMI(MBB, I, DL, get(BF::MOVE_ncccc), DestReg).addReg(SrcReg);
- return true;
+
+ if (DestReg == BF::NCC && SrcReg == BF::CC) {
+ BuildMI(MBB, I, DL, get(BF::MOVE_ncccc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- if (inClass(BF::JustCCRegClass, DestReg, DestRC) &&
- inClass(BF::NotCCRegClass, SrcReg, SrcRC)) {
- BuildMI(MBB, I, DL, get(BF::MOVE_ccncc), DestReg).addReg(SrcReg);
- return true;
+ if (DestReg == BF::CC && SrcReg == BF::NCC) {
+ BuildMI(MBB, I, DL, get(BF::MOVE_ccncc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- llvm_unreachable((std::string("Bad regclasses for reg-to-reg copy: ")+
- SrcRC->getName() + " -> " + DestRC->getName()).c_str());
- return false;
+ llvm_unreachable("Bad reg-to-reg copy");
+}
+
+static bool inClass(const TargetRegisterClass &Test,
+ unsigned Reg,
+ const TargetRegisterClass *RC) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return Test.contains(Reg);
+ else
+ return &Test==RC || Test.hasSubClass(RC);
}
void
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h
index c1dcd58..6c35917 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.h
@@ -44,14 +44,13 @@ namespace llvm {
InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
-
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
index 5cf350a..8034a7f 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinInstrInfo.td
@@ -488,7 +488,7 @@ def MOVE: F1<(outs ALL:$dst), (ins ALL:$src),
"$dst = $src;",
[]>;
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
def MOVEcc: F1<(outs DP:$dst), (ins DP:$src1, DP:$src2, AnyCC:$cc),
"if $cc $dst = $src2;",
[(set DP:$dst, (select AnyCC:$cc, DP:$src2, DP:$src1))]>;
@@ -645,7 +645,7 @@ def XOR: F1<(outs D:$dst), (ins D:$src1, D:$src2),
// Table C-15. Bit Operations Instructions
//===----------------------------------------------------------------------===//
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def BITCLR: F1<(outs D:$dst), (ins D:$src1, uimm5imask:$src2),
"bitclr($dst, $src2);",
[(set D:$dst, (and D:$src1, uimm5imask:$src2))]>;
@@ -691,7 +691,7 @@ multiclass SHIFT32<SDNode opnode, string ops> {
}
let Defs = [AZ, AN, V, VS],
- isTwoAddress = 1 in {
+ Constraints = "$src = $dst" in {
defm SRA : SHIFT32<sra, ">>>">;
defm SRL : SHIFT32<srl, ">>">;
defm SLL : SHIFT32<shl, "<<">;
@@ -748,7 +748,7 @@ def ADD16: F2<(outs D16:$dst), (ins D16:$src1, D16:$src2),
"$dst = $src1 + $src2;",
[(set D16:$dst, (add D16:$src1, D16:$src2))]>;
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
def ADDimm7: F1<(outs D:$dst), (ins D:$src1, i32imm:$src2),
"$dst += $src2;",
[(set D:$dst, (add D:$src1, imm7:$src2))]>;
@@ -775,7 +775,7 @@ def NEG: F1<(outs D:$dst), (ins D:$src),
def ADDpp: F1<(outs P:$dst), (ins P:$src1, P:$src2),
"$dst = $src1 + $src2;", []>;
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
def ADDpp_imm7: F1<(outs P:$dst), (ins P:$src1, i32imm:$src2),
"$dst += $src2;", []>;
@@ -802,7 +802,7 @@ def MULhh32u: F2<(outs D:$dst), (ins D16:$src1, D16:$src2),
}
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
def MUL32: F1<(outs D:$dst), (ins D:$src1, D:$src2),
"$dst *= $src2;",
[(set D:$dst, (mul D:$src1, D:$src2))]>;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.cpp
index 31470fb..5b9d4a2 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.cpp
@@ -15,7 +15,7 @@
using namespace llvm;
-BlackfinMCAsmInfo::BlackfinMCAsmInfo(const Target &T, const StringRef &TT) {
+BlackfinMCAsmInfo::BlackfinMCAsmInfo(const Target &T, StringRef TT) {
GlobalPrefix = "_";
CommentString = "//";
HasSetDirective = false;
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.h
index 0efc295..c372aa2 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinMCAsmInfo.h
@@ -14,14 +14,14 @@
#ifndef BLACKFINTARGETASMINFO_H
#define BLACKFINTARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
struct BlackfinMCAsmInfo : public MCAsmInfo {
- explicit BlackfinMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit BlackfinMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
index 5153ace..06e95de 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
@@ -48,17 +48,6 @@ BlackfinRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CalleeSavedRegs;
}
-const TargetRegisterClass* const *BlackfinRegisterInfo::
-getCalleeSavedRegClasses(const MachineFunction *MF) const {
- using namespace BF;
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &PRegClass,
- &DRegClass, &DRegClass, &DRegClass, &DRegClass,
- &PRegClass, &PRegClass, &PRegClass,
- 0 };
- return CalleeSavedRegClasses;
-}
-
BitVector
BlackfinRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
using namespace BF;
@@ -86,25 +75,6 @@ BlackfinRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-const TargetRegisterClass*
-BlackfinRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
- assert(isPhysicalRegister(reg) && "reg must be a physical register");
-
- // Pick the smallest register class of the right type that contains
- // this physreg.
- const TargetRegisterClass* BestRC = 0;
- for (regclass_iterator I = regclass_begin(), E = regclass_end();
- I != E; ++I) {
- const TargetRegisterClass* RC = *I;
- if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
- (!BestRC || RC->getNumRegs() < BestRC->getNumRegs()))
- BestRC = RC;
- }
-
- assert(BestRC && "Couldn't find the register class");
- return BestRC;
-}
-
// hasFP - Return true if the specified function should have a dedicated frame
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
diff --git a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
index 03c5450..ead0b4a 100644
--- a/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Blackfin/BlackfinRegisterInfo.h
@@ -33,9 +33,6 @@ namespace llvm {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
// getSubReg implemented by tablegen
@@ -44,9 +41,6 @@ namespace llvm {
return &BF::PRegClass;
}
- const TargetRegisterClass *getPhysicalRegisterRegClass(unsigned reg,
- EVT VT) const;
-
bool hasFP(const MachineFunction &MF) const;
// bool hasReservedCallFrame(MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/CBackend/CBackend.cpp b/contrib/llvm/lib/Target/CBackend/CBackend.cpp
index 55b8aaa..e8d8474 100644
--- a/contrib/llvm/lib/Target/CBackend/CBackend.cpp
+++ b/contrib/llvm/lib/Target/CBackend/CBackend.cpp
@@ -264,7 +264,7 @@ namespace {
//
static const AllocaInst *isDirectAlloca(const Value *V) {
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
- if (!AI) return false;
+ if (!AI) return 0;
if (AI->isArrayAllocation())
return 0; // FIXME: we can also inline fixed size array allocas!
if (AI->getParent() != &AI->getParent()->getParent()->getEntryBlock())
@@ -2889,7 +2889,7 @@ void CWriter::visitCallInst(CallInst &I) {
bool hasByVal = I.hasByValArgument();
bool isStructRet = I.hasStructRetAttr();
if (isStructRet) {
- writeOperandDeref(I.getOperand(1));
+ writeOperandDeref(I.getArgOperand(0));
Out << " = ";
}
@@ -2944,8 +2944,8 @@ void CWriter::visitCallInst(CallInst &I) {
}
unsigned NumDeclaredParams = FTy->getNumParams();
-
- CallSite::arg_iterator AI = I.op_begin()+1, AE = I.op_end();
+ CallSite CS(&I);
+ CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
unsigned ArgNo = 0;
if (isStructRet) { // Skip struct return argument.
++AI;
@@ -2999,7 +2999,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
Out << "0; ";
Out << "va_start(*(va_list*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", ";
// Output the last argument to the enclosing function.
if (I.getParent()->getParent()->arg_empty())
@@ -3009,9 +3009,9 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
Out << ')';
return true;
case Intrinsic::vaend:
- if (!isa<ConstantPointerNull>(I.getOperand(1))) {
+ if (!isa<ConstantPointerNull>(I.getArgOperand(0))) {
Out << "0; va_end(*(va_list*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ')';
} else {
Out << "va_end(*(va_list*)0)";
@@ -3020,47 +3020,47 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
case Intrinsic::vacopy:
Out << "0; ";
Out << "va_copy(*(va_list*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", *(va_list*)";
- writeOperand(I.getOperand(2));
+ writeOperand(I.getArgOperand(1));
Out << ')';
return true;
case Intrinsic::returnaddress:
Out << "__builtin_return_address(";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ')';
return true;
case Intrinsic::frameaddress:
Out << "__builtin_frame_address(";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ')';
return true;
case Intrinsic::powi:
Out << "__builtin_powi(";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", ";
- writeOperand(I.getOperand(2));
+ writeOperand(I.getArgOperand(1));
Out << ')';
return true;
case Intrinsic::setjmp:
Out << "setjmp(*(jmp_buf*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ')';
return true;
case Intrinsic::longjmp:
Out << "longjmp(*(jmp_buf*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", ";
- writeOperand(I.getOperand(2));
+ writeOperand(I.getArgOperand(1));
Out << ')';
return true;
case Intrinsic::prefetch:
Out << "LLVM_PREFETCH((const void *)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", ";
- writeOperand(I.getOperand(2));
+ writeOperand(I.getArgOperand(1));
Out << ", ";
- writeOperand(I.getOperand(3));
+ writeOperand(I.getArgOperand(2));
Out << ")";
return true;
case Intrinsic::stacksave:
@@ -3077,7 +3077,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
printType(Out, I.getType());
Out << ')';
// Multiple GCC builtins multiplex onto this intrinsic.
- switch (cast<ConstantInt>(I.getOperand(3))->getZExtValue()) {
+ switch (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue()) {
default: llvm_unreachable("Invalid llvm.x86.sse.cmp!");
case 0: Out << "__builtin_ia32_cmpeq"; break;
case 1: Out << "__builtin_ia32_cmplt"; break;
@@ -3098,9 +3098,9 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
Out << 'd';
Out << "(";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ", ";
- writeOperand(I.getOperand(2));
+ writeOperand(I.getArgOperand(1));
Out << ")";
return true;
case Intrinsic::ppc_altivec_lvsl:
@@ -3108,7 +3108,7 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
printType(Out, I.getType());
Out << ')';
Out << "__builtin_altivec_lvsl(0, (void*)";
- writeOperand(I.getOperand(1));
+ writeOperand(I.getArgOperand(0));
Out << ")";
return true;
}
@@ -3221,7 +3221,7 @@ void CWriter::visitInlineAsm(CallInst &CI) {
DestVal = ResultVals[ValueCount].first;
DestValNo = ResultVals[ValueCount].second;
} else
- DestVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+ DestVal = CI.getArgOperand(ValueCount-ResultVals.size());
if (I->isEarlyClobber)
C = "&"+C;
@@ -3255,7 +3255,7 @@ void CWriter::visitInlineAsm(CallInst &CI) {
}
assert(ValueCount >= ResultVals.size() && "Input can't refer to result");
- Value *SrcVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+ Value *SrcVal = CI.getArgOperand(ValueCount-ResultVals.size());
Out << "\"" << C << "\"(";
if (!I->isIndirect)
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td b/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
index 10dc837..ec2f663 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUCallingConv.td
@@ -34,76 +34,19 @@ def RetCC_SPU : CallingConv<[
//===----------------------------------------------------------------------===//
// CellSPU Argument Calling Conventions
-// (note: this isn't used, but presumably should be at some point when other
-// targets do.)
//===----------------------------------------------------------------------===//
-/*
-def CC_SPU : CallingConv<[
- CCIfType<[i8], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[i16], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[i64], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[f64], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
- CCIfType<[v16i8, v8i16, v4i32, v4f32, v2i64, v2f64],
- CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
- R12, R13, R14, R15, R16, R17, R18, R19, R20,
- R21, R22, R23, R24, R25, R26, R27, R28, R29,
- R30, R31, R32, R33, R34, R35, R36, R37, R38,
- R39, R40, R41, R42, R43, R44, R45, R46, R47,
- R48, R49, R50, R51, R52, R53, R54, R55, R56,
- R57, R58, R59, R60, R61, R62, R63, R64, R65,
- R66, R67, R68, R69, R70, R71, R72, R73, R74,
- R75, R76, R77, R78, R79]>>,
-
+def CCC_SPU : CallingConv<[
+ CCIfType<[i8, i16, i32, i64, i128, f32, f64,
+ v16i8, v8i16, v4i32, v4f32, v2i64, v2f64],
+ CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10, R11,
+ R12, R13, R14, R15, R16, R17, R18, R19, R20,
+ R21, R22, R23, R24, R25, R26, R27, R28, R29,
+ R30, R31, R32, R33, R34, R35, R36, R37, R38,
+ R39, R40, R41, R42, R43, R44, R45, R46, R47,
+ R48, R49, R50, R51, R52, R53, R54, R55, R56,
+ R57, R58, R59, R60, R61, R62, R63, R64, R65,
+ R66, R67, R68, R69, R70, R71, R72, R73, R74,
+ R75, R76, R77, R78, R79]>>,
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -112,4 +55,3 @@ def CC_SPU : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
CCAssignToStack<16, 16>>
]>;
-*/
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h
index e8ca333..f511acd 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUFrameInfo.h
@@ -53,10 +53,6 @@ namespace llvm {
static int minStackSize() {
return (2 * stackSlotSize());
}
- //! Frame size required to spill all registers plus frame info
- static int fullSpillSize() {
- return (SPURegisterInfo::getNumArgRegs() * stackSlotSize());
- }
//! Convert frame index to stack offset
static int FItoStackOffset(int frame_index) {
return frame_index * stackSlotSize();
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index 9afdb2b..9b8c2dd 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -275,7 +275,6 @@ namespace {
SDNode *emitBuildVector(SDNode *bvNode) {
EVT vecVT = bvNode->getValueType(0);
- EVT eltVT = vecVT.getVectorElementType();
DebugLoc dl = bvNode->getDebugLoc();
// Check to see if this vector can be represented as a CellSPU immediate
@@ -606,18 +605,14 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
Base = CurDAG->getTargetConstant(0, N.getValueType());
Index = N;
return true;
- } else if (Opc == ISD::Register || Opc == ISD::CopyFromReg) {
+ } else if (Opc == ISD::Register
+ ||Opc == ISD::CopyFromReg
+ ||Opc == ISD::UNDEF) {
unsigned OpOpc = Op->getOpcode();
if (OpOpc == ISD::STORE || OpOpc == ISD::LOAD) {
// Direct load/store without getelementptr
- SDValue Addr, Offs;
-
- // Get the register from CopyFromReg
- if (Opc == ISD::CopyFromReg)
- Addr = N.getOperand(1);
- else
- Addr = N; // Register
+ SDValue Offs;
Offs = ((OpOpc == ISD::STORE) ? Op->getOperand(3) : Op->getOperand(2));
@@ -626,7 +621,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
Offs = CurDAG->getTargetConstant(0, Offs.getValueType());
Base = Offs;
- Index = Addr;
+ Index = N;
return true;
}
} else {
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
index 081e8d0..ece19b9 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -953,7 +953,8 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
+ SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ PtrVT, GSDN->getOffset());
const TargetMachine &TM = DAG.getTarget();
SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there is no actual debug info here
@@ -1013,22 +1014,26 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
MachineRegisterInfo &RegInfo = MF.getRegInfo();
SPUFunctionInfo *FuncInfo = MF.getInfo<SPUFunctionInfo>();
- const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
- const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
-
unsigned ArgOffset = SPUFrameInfo::minStackSize();
unsigned ArgRegIdx = 0;
unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
+ *DAG.getContext());
+ // FIXME: allow for other calling conventions
+ CCInfo.AnalyzeFormalArguments(Ins, CCC_SPU);
+
// Add DAG nodes to load the arguments or copy them out of registers.
for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
EVT ObjectVT = Ins[ArgNo].VT;
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
SDValue ArgVal;
+ CCValAssign &VA = ArgLocs[ArgNo];
- if (ArgRegIdx < NumArgRegs) {
+ if (VA.isRegLoc()) {
const TargetRegisterClass *ArgRegClass;
switch (ObjectVT.getSimpleVT().SimpleTy) {
@@ -1067,14 +1072,14 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
}
unsigned VReg = RegInfo.createVirtualRegister(ArgRegClass);
- RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg);
+ RegInfo.addLiveIn(VA.getLocReg(), VReg);
ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
++ArgRegIdx;
} else {
// We need to load the argument to a virtual register if we determined
// above that we ran out of physical registers of the appropriate type
// or we're forced to do vararg
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true, false);
+ int FI = MFI->CreateFixedObject(ObjSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0, false, false, 0);
ArgOffset += StackSlotSize;
@@ -1087,16 +1092,31 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
// vararg handling:
if (isVarArg) {
- // unsigned int ptr_size = PtrVT.getSizeInBits() / 8;
+ // FIXME: we should be able to query the argument registers from
+ // tablegen generated code.
+ static const unsigned ArgRegs[] = {
+ SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
+ SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
+ SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
+ SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
+ SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
+ SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
+ SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
+ SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
+ SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
+ SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
+ SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
+ };
+ // size of ArgRegs array
+ unsigned NumArgRegs = 77;
+
// We will spill (79-3)+1 registers to the stack
SmallVector<SDValue, 79-3+1> MemOps;
// Create the frame slot
-
for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
FuncInfo->setVarArgsFrameIndex(
- MFI->CreateFixedObject(StackSlotSize, ArgOffset,
- true, false));
+ MFI->CreateFixedObject(StackSlotSize, ArgOffset, true));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
unsigned VReg = MF.addLiveIn(ArgRegs[ArgRegIdx], &SPU::R32CRegClass);
SDValue ArgVal = DAG.getRegister(VReg, MVT::v16i8);
@@ -1135,6 +1155,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1144,8 +1165,15 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
unsigned NumOps = Outs.size();
unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
- const unsigned *ArgRegs = SPURegisterInfo::getArgRegs();
- const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
+
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
+ *DAG.getContext());
+ // FIXME: allow for other calling conventions
+ CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
+
+ const unsigned NumArgRegs = ArgLocs.size();
+
// Handy pointer type
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
@@ -1165,8 +1193,9 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// And the arguments passed on the stack
SmallVector<SDValue, 8> MemOpChains;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
+ for (; ArgRegIdx != NumOps; ++ArgRegIdx) {
+ SDValue Arg = OutVals[ArgRegIdx];
+ CCValAssign &VA = ArgLocs[ArgRegIdx];
// PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it.
@@ -1180,24 +1209,8 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case MVT::i32:
case MVT::i64:
case MVT::i128:
- if (ArgRegIdx != NumArgRegs) {
- RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
- } else {
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0));
- ArgOffset += StackSlotSize;
- }
- break;
case MVT::f32:
case MVT::f64:
- if (ArgRegIdx != NumArgRegs) {
- RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
- } else {
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
- false, false, 0));
- ArgOffset += StackSlotSize;
- }
- break;
case MVT::v2i64:
case MVT::v2f64:
case MVT::v4f32:
@@ -1205,7 +1218,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case MVT::v8i16:
case MVT::v16i8:
if (ArgRegIdx != NumArgRegs) {
- RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0,
false, false, 0));
@@ -1249,7 +1262,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
const GlobalValue *GV = G->getGlobal();
EVT CalleeVT = Callee.getValueType();
SDValue Zero = DAG.getConstant(0, PtrVT);
- SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, CalleeVT);
if (!ST->usingLargeMem()) {
// Turn calls to targets that are defined (i.e., have bodies) into BRSL
@@ -1355,6 +1368,7 @@ SDValue
SPUTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
@@ -1376,7 +1390,7 @@ SPUTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
Flag = Chain.getValue(1);
}
@@ -1746,15 +1760,20 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
unsigned V0Elt = 0;
bool monotonic = true;
bool rotate = true;
+ EVT maskVT; // which of the c?d instructions to use
if (EltVT == MVT::i8) {
V2EltIdx0 = 16;
+ maskVT = MVT::v16i8;
} else if (EltVT == MVT::i16) {
V2EltIdx0 = 8;
+ maskVT = MVT::v8i16;
} else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
V2EltIdx0 = 4;
+ maskVT = MVT::v4i32;
} else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
V2EltIdx0 = 2;
+ maskVT = MVT::v2i64;
} else
llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
@@ -1786,7 +1805,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
} else {
rotate = false;
}
- } else if (PrevElt == 0) {
+ } else if (i == 0) {
// First time through, need to keep track of previous element
PrevElt = SrcElt;
} else {
@@ -1798,18 +1817,16 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (EltsFromV2 == 1 && monotonic) {
// Compute mask and shuffle
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
- unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
- // Initialize temporary register to 0
- SDValue InitTempReg =
- DAG.getCopyToReg(DAG.getEntryNode(), dl, VReg, DAG.getConstant(0, PtrVT));
- // Copy register's contents as index in SHUFFLE_MASK:
- SDValue ShufMaskOp =
- DAG.getNode(SPUISD::SHUFFLE_MASK, dl, MVT::v4i32,
- DAG.getTargetConstant(V2Elt, MVT::i32),
- DAG.getCopyFromReg(InitTempReg, dl, VReg, PtrVT));
+
+ // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
+ // R1 ($sp) is used here only as it is guaranteed to have last bits zero
+ SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+ DAG.getRegister(SPU::R1, PtrVT),
+ DAG.getConstant(V2Elt, MVT::i32));
+ SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
+ maskVT, Pointer);
+
// Use shuffle mask in SHUFB synthetic instruction:
return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
ShufMaskOp);
@@ -2056,14 +2073,19 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
EVT VT = Op.getValueType();
- ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
- assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
+ // use 0 when the lane to insert to is 'undef'
+ int64_t Idx=0;
+ if (IdxOp.getOpcode() != ISD::UNDEF) {
+ ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
+ assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
+ Idx = (CN->getSExtValue());
+ }
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
// Use $sp ($1) because it's always 16-byte aligned and it's available:
SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
DAG.getRegister(SPU::R1, PtrVT),
- DAG.getConstant(CN->getSExtValue(), PtrVT));
+ DAG.getConstant(Idx, PtrVT));
SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, VT, Pointer);
SDValue result =
@@ -2862,7 +2884,7 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
case SPUISD::IndirectAddr: {
if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (CN != 0 && CN->getZExtValue() == 0) {
+ if (CN != 0 && CN->isNullValue()) {
// (SPUindirect (SPUaform <addr>, 0), 0) ->
// (SPUaform <addr>, 0)
@@ -3056,12 +3078,10 @@ SPUTargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op,
void
SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const {
// Default, for the time being, to the base class handler
- TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, hasMemory,
- Ops, DAG);
+ TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
}
/// isLegalAddressImmediate - Return true if the integer value can be used
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
index 9ebd442..6d3c90b 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUISelLowering.h
@@ -134,7 +134,6 @@ namespace llvm {
EVT VT) const;
void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -160,6 +159,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -168,6 +168,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
};
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
index 4c53c98..69aa088 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.cpp
@@ -164,11 +164,9 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI,
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
"invalid SPU OR<type>_<vec> or LR instruction!");
- if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
sourceReg = MI.getOperand(1).getReg();
destReg = MI.getOperand(0).getReg();
return true;
- }
break;
}
case SPU::ORv16i8:
@@ -251,40 +249,18 @@ SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
return 0;
}
-bool SPUInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const
+void SPUInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const
{
// We support cross register class moves for our aliases, such as R3 in any
// reg class to any other reg class containing R3. This is required because
// we instruction select bitconvert i64 -> f64 as a noop for example, so our
// types have no specific meaning.
- if (DestRC == SPU::R8CRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRr8), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::R16CRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRr16), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::R32CRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRr32), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::R32FPRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRf32), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::R64CRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRr64), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::R64FPRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRf64), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::GPRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRr128), DestReg).addReg(SrcReg);
- } else if (DestRC == SPU::VECREGRegisterClass) {
- BuildMI(MBB, MI, DL, get(SPU::LRv16i8), DestReg).addReg(SrcReg);
- } else {
- // Attempt to copy unknown/unsupported register class!
- return false;
- }
-
- return true;
+ BuildMI(MBB, I, DL, get(SPU::LRr128), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
void
@@ -356,88 +332,6 @@ SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
addFrameReference(BuildMI(MBB, MI, DL, get(opc), DestReg), FrameIdx);
}
-//! Return true if the specified load or store can be folded
-bool
-SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- // Make sure this is a reg-reg copy.
- unsigned Opc = MI->getOpcode();
-
- switch (Opc) {
- case SPU::ORv16i8:
- case SPU::ORv8i16:
- case SPU::ORv4i32:
- case SPU::ORv2i64:
- case SPU::ORr8:
- case SPU::ORr16:
- case SPU::ORr32:
- case SPU::ORr64:
- case SPU::ORf32:
- case SPU::ORf64:
- if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
- return true;
- break;
- }
-
- return false;
-}
-
-/// foldMemoryOperand - SPU, like PPC, can only fold spills into
-/// copy instructions, turning them into load/store instructions.
-MachineInstr *
-SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const
-{
- if (Ops.size() != 1) return 0;
-
- unsigned OpNum = Ops[0];
- unsigned Opc = MI->getOpcode();
- MachineInstr *NewMI = 0;
-
- switch (Opc) {
- case SPU::ORv16i8:
- case SPU::ORv8i16:
- case SPU::ORv4i32:
- case SPU::ORv2i64:
- case SPU::ORr8:
- case SPU::ORr16:
- case SPU::ORr32:
- case SPU::ORr64:
- case SPU::ORf32:
- case SPU::ORf64:
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
- get(SPU::STQDr32));
-
- MIB.addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef));
- NewMI = addFrameReference(MIB, FrameIndex);
- }
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
-
- MIB.addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
- getUndefRegState(isUndef));
- Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
- ? SPU::STQDr32 : SPU::STQXr32;
- NewMI = addFrameReference(MIB, FrameIndex);
- break;
- }
- }
-
- return NewMI;
-}
-
//! Branch analysis
/*!
\note This code was kiped from PPC. There may be more branch analysis for
@@ -554,9 +448,8 @@ SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -566,14 +459,14 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (FBB == 0) {
if (Cond.empty()) {
// Unconditional branch
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(SPU::BR));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(SPU::BR));
MIB.addMBB(TBB);
DEBUG(errs() << "Inserted one-way uncond branch: ");
DEBUG((*MIB).dump());
} else {
// Conditional branch
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
MIB.addReg(Cond[1].getReg()).addMBB(TBB);
DEBUG(errs() << "Inserted one-way cond branch: ");
@@ -581,8 +474,8 @@ SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
}
return 1;
} else {
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
- MachineInstrBuilder MIB2 = BuildMI(&MBB, dl, get(SPU::BR));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
+ MachineInstrBuilder MIB2 = BuildMI(&MBB, DL, get(SPU::BR));
// Two-way Conditional Branch.
MIB.addReg(Cond[1].getReg()).addMBB(TBB);
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
index 6dabd7c..fbb1733 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUInstrInfo.h
@@ -23,19 +23,6 @@ namespace llvm {
class SPUInstrInfo : public TargetInstrInfoImpl {
SPUTargetMachine &TM;
const SPURegisterInfo RI;
- protected:
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
public:
explicit SPUInstrInfo(SPUTargetMachine &tm);
@@ -56,12 +43,10 @@ namespace llvm {
unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
//! Store a register to a stack slot, based on its register class.
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -77,11 +62,6 @@ namespace llvm {
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- //! Return true if the specified load or store can be folded
- virtual
- bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
//! Reverses a branch's condition, returning false on success.
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
@@ -94,8 +74,9 @@ namespace llvm {
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
};
}
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
index 68445cf..25ba88a 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.cpp
@@ -14,7 +14,7 @@
#include "SPUMCAsmInfo.h"
using namespace llvm;
-SPULinuxMCAsmInfo::SPULinuxMCAsmInfo(const Target &T, const StringRef &TT) {
+SPULinuxMCAsmInfo::SPULinuxMCAsmInfo(const Target &T, StringRef TT) {
ZeroDirective = "\t.space\t";
Data64bitsDirective = "\t.quad\t";
AlignmentIsInBytes = false;
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.h b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.h
index 8d75ea8..7f850d3 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPUMCAsmInfo.h
@@ -14,14 +14,14 @@
#ifndef SPUTARGETASMINFO_H
#define SPUTARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
struct SPULinuxMCAsmInfo : public MCAsmInfo {
- explicit SPULinuxMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit SPULinuxMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
index 846c7ed..647da30 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
+++ b/contrib/llvm/lib/Target/CellSPU/SPUNodes.td
@@ -21,7 +21,7 @@ def SPUshufmask : SDNode<"SPUISD::SHUFFLE_MASK", SPU_GenControl, []>;
def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPUCallSeq,
[SDNPHasChain, SDNPOutFlag]>;
def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPUCallSeq,
- [SDNPHasChain, SDNPOutFlag]>;
+ [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
//===----------------------------------------------------------------------===//
// Operand constraints:
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
index d8937ec..f7cfa42 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.cpp
@@ -191,33 +191,6 @@ SPURegisterInfo::SPURegisterInfo(const SPUSubtarget &subtarget,
{
}
-// SPU's 128-bit registers used for argument passing:
-static const unsigned SPU_ArgRegs[] = {
- SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
- SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
- SPU::R17, SPU::R18, SPU::R19, SPU::R20, SPU::R21, SPU::R22, SPU::R23,
- SPU::R24, SPU::R25, SPU::R26, SPU::R27, SPU::R28, SPU::R29, SPU::R30,
- SPU::R31, SPU::R32, SPU::R33, SPU::R34, SPU::R35, SPU::R36, SPU::R37,
- SPU::R38, SPU::R39, SPU::R40, SPU::R41, SPU::R42, SPU::R43, SPU::R44,
- SPU::R45, SPU::R46, SPU::R47, SPU::R48, SPU::R49, SPU::R50, SPU::R51,
- SPU::R52, SPU::R53, SPU::R54, SPU::R55, SPU::R56, SPU::R57, SPU::R58,
- SPU::R59, SPU::R60, SPU::R61, SPU::R62, SPU::R63, SPU::R64, SPU::R65,
- SPU::R66, SPU::R67, SPU::R68, SPU::R69, SPU::R70, SPU::R71, SPU::R72,
- SPU::R73, SPU::R74, SPU::R75, SPU::R76, SPU::R77, SPU::R78, SPU::R79
-};
-
-const unsigned *
-SPURegisterInfo::getArgRegs()
-{
- return SPU_ArgRegs;
-}
-
-unsigned
-SPURegisterInfo::getNumArgRegs()
-{
- return sizeof(SPU_ArgRegs) / sizeof(SPU_ArgRegs[0]);
-}
-
/// getPointerRegClass - Return the register class to use to hold pointers.
/// This is used for addressing modes.
const TargetRegisterClass *
@@ -251,36 +224,6 @@ SPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const
return SPU_CalleeSaveRegs;
}
-const TargetRegisterClass* const*
-SPURegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const
-{
- // Cell ABI Calling Convention
- static const TargetRegisterClass * const SPU_CalleeSaveRegClasses[] = {
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, &SPU::GPRCRegClass, &SPU::GPRCRegClass,
- &SPU::GPRCRegClass, /* environment pointer */
- &SPU::GPRCRegClass, /* stack pointer */
- &SPU::GPRCRegClass, /* link register */
- 0 /* end */
- };
-
- return SPU_CalleeSaveRegClasses;
-}
-
/*!
R0 (link register), R1 (stack pointer) and R2 (environment pointer -- this is
generally unused) are the Cell's reserved registers
diff --git a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
index 0a70318..7a6ae6d 100644
--- a/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
+++ b/contrib/llvm/lib/Target/CellSPU/SPURegisterInfo.h
@@ -49,10 +49,6 @@ namespace llvm {
//! Return the array of callee-saved registers
virtual const unsigned* getCalleeSavedRegs(const MachineFunction *MF) const;
- //! Return the register class array of the callee-saved registers
- virtual const TargetRegisterClass* const *
- getCalleeSavedRegClasses(const MachineFunction *MF) const;
-
//! Allow for scavenging, so we can get scratch registers when needed.
virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
{ return true; }
@@ -90,15 +86,6 @@ namespace llvm {
// New methods added:
//------------------------------------------------------------------------
- //! Return the array of argument passing registers
- /*!
- \note The size of this array is returned by getArgRegsSize().
- */
- static const unsigned *getArgRegs();
-
- //! Return the size of the argument passing register array
- static unsigned getNumArgRegs();
-
//! Get DWARF debugging register number
int getDwarfRegNum(unsigned RegNum, bool isEH) const;
diff --git a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
index 45a0c84..145568a 100644
--- a/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
+++ b/contrib/llvm/lib/Target/CppBackend/CPPBackend.cpp
@@ -99,11 +99,12 @@ namespace {
ValueSet DefinedValues;
ForwardRefMap ForwardRefs;
bool is_inline;
+ unsigned indent_level;
public:
static char ID;
explicit CppWriter(formatted_raw_ostream &o) :
- ModulePass(&ID), Out(o), uniqueNum(0), is_inline(false) {}
+ ModulePass(&ID), Out(o), uniqueNum(0), is_inline(false), indent_level(0){}
virtual const char *getPassName() const { return "C++ backend"; }
@@ -120,6 +121,11 @@ namespace {
void error(const std::string& msg);
+
+ formatted_raw_ostream& nl(formatted_raw_ostream &Out, int delta = 0);
+ inline void in() { indent_level++; }
+ inline void out() { if (indent_level >0) indent_level--; }
+
private:
void printLinkageType(GlobalValue::LinkageTypes LT);
void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
@@ -153,1857 +159,1856 @@ namespace {
void printModuleBody();
};
+} // end anonymous namespace.
+
+formatted_raw_ostream &CppWriter::nl(formatted_raw_ostream &Out, int delta) {
+ Out << '\n';
+ if (delta >= 0 || indent_level >= unsigned(-delta))
+ indent_level += delta;
+ Out.indent(indent_level);
+ return Out;
+}
+
+static inline void sanitize(std::string &str) {
+ for (size_t i = 0; i < str.length(); ++i)
+ if (!isalnum(str[i]) && str[i] != '_')
+ str[i] = '_';
+}
- static unsigned indent_level = 0;
- inline formatted_raw_ostream& nl(formatted_raw_ostream& Out, int delta = 0) {
- Out << "\n";
- if (delta >= 0 || indent_level >= unsigned(-delta))
- indent_level += delta;
- for (unsigned i = 0; i < indent_level; ++i)
- Out << " ";
- return Out;
+static std::string getTypePrefix(const Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return "void_";
+ case Type::IntegerTyID:
+ return "int" + utostr(cast<IntegerType>(Ty)->getBitWidth()) + "_";
+ case Type::FloatTyID: return "float_";
+ case Type::DoubleTyID: return "double_";
+ case Type::LabelTyID: return "label_";
+ case Type::FunctionTyID: return "func_";
+ case Type::StructTyID: return "struct_";
+ case Type::ArrayTyID: return "array_";
+ case Type::PointerTyID: return "ptr_";
+ case Type::VectorTyID: return "packed_";
+ case Type::OpaqueTyID: return "opaque_";
+ default: return "other_";
}
+ return "unknown_";
+}
- inline void in() { indent_level++; }
- inline void out() { if (indent_level >0) indent_level--; }
+// Looks up the type in the symbol table and returns a pointer to its name or
+// a null pointer if it wasn't found. Note that this isn't the same as the
+// Mode::getTypeName function which will return an empty string, not a null
+// pointer if the name is not found.
+static const std::string *
+findTypeName(const TypeSymbolTable& ST, const Type* Ty) {
+ TypeSymbolTable::const_iterator TI = ST.begin();
+ TypeSymbolTable::const_iterator TE = ST.end();
+ for (;TI != TE; ++TI)
+ if (TI->second == Ty)
+ return &(TI->first);
+ return 0;
+}
- inline void
- sanitize(std::string& str) {
- for (size_t i = 0; i < str.length(); ++i)
- if (!isalnum(str[i]) && str[i] != '_')
- str[i] = '_';
- }
+void CppWriter::error(const std::string& msg) {
+ report_fatal_error(msg);
+}
- inline std::string
- getTypePrefix(const Type* Ty ) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return "void_";
- case Type::IntegerTyID:
- return std::string("int") + utostr(cast<IntegerType>(Ty)->getBitWidth()) +
- "_";
- case Type::FloatTyID: return "float_";
- case Type::DoubleTyID: return "double_";
- case Type::LabelTyID: return "label_";
- case Type::FunctionTyID: return "func_";
- case Type::StructTyID: return "struct_";
- case Type::ArrayTyID: return "array_";
- case Type::PointerTyID: return "ptr_";
- case Type::VectorTyID: return "packed_";
- case Type::OpaqueTyID: return "opaque_";
- default: return "other_";
- }
- return "unknown_";
- }
-
- // Looks up the type in the symbol table and returns a pointer to its name or
- // a null pointer if it wasn't found. Note that this isn't the same as the
- // Mode::getTypeName function which will return an empty string, not a null
- // pointer if the name is not found.
- inline const std::string*
- findTypeName(const TypeSymbolTable& ST, const Type* Ty) {
- TypeSymbolTable::const_iterator TI = ST.begin();
- TypeSymbolTable::const_iterator TE = ST.end();
- for (;TI != TE; ++TI)
- if (TI->second == Ty)
- return &(TI->first);
- return 0;
- }
-
- void CppWriter::error(const std::string& msg) {
- report_fatal_error(msg);
- }
-
- // printCFP - Print a floating point constant .. very carefully :)
- // This makes sure that conversion to/from floating yields the same binary
- // result so that we don't lose precision.
- void CppWriter::printCFP(const ConstantFP *CFP) {
- bool ignored;
- APFloat APF = APFloat(CFP->getValueAPF()); // copy
- if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
- APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
- Out << "ConstantFP::get(mod->getContext(), ";
- Out << "APFloat(";
+// printCFP - Print a floating point constant .. very carefully :)
+// This makes sure that conversion to/from floating yields the same binary
+// result so that we don't lose precision.
+void CppWriter::printCFP(const ConstantFP *CFP) {
+ bool ignored;
+ APFloat APF = APFloat(CFP->getValueAPF()); // copy
+ if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
+ APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
+ Out << "ConstantFP::get(mod->getContext(), ";
+ Out << "APFloat(";
#if HAVE_PRINTF_A
- char Buffer[100];
- sprintf(Buffer, "%A", APF.convertToDouble());
- if ((!strncmp(Buffer, "0x", 2) ||
- !strncmp(Buffer, "-0x", 3) ||
- !strncmp(Buffer, "+0x", 3)) &&
- APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
- if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << "BitsToDouble(" << Buffer << ")";
- else
- Out << "BitsToFloat((float)" << Buffer << ")";
- Out << ")";
- } else {
+ char Buffer[100];
+ sprintf(Buffer, "%A", APF.convertToDouble());
+ if ((!strncmp(Buffer, "0x", 2) ||
+ !strncmp(Buffer, "-0x", 3) ||
+ !strncmp(Buffer, "+0x", 3)) &&
+ APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
+ if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+ Out << "BitsToDouble(" << Buffer << ")";
+ else
+ Out << "BitsToFloat((float)" << Buffer << ")";
+ Out << ")";
+ } else {
#endif
- std::string StrVal = ftostr(CFP->getValueAPF());
-
- while (StrVal[0] == ' ')
- StrVal.erase(StrVal.begin());
-
- // Check to make sure that the stringized number is not some string like
- // "Inf" or NaN. Check that the string matches the "[-+]?[0-9]" regex.
- if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
- ((StrVal[0] == '-' || StrVal[0] == '+') &&
- (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
- (CFP->isExactlyValue(atof(StrVal.c_str())))) {
- if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << StrVal;
- else
- Out << StrVal << "f";
- } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
- Out << "BitsToDouble(0x"
- << utohexstr(CFP->getValueAPF().bitcastToAPInt().getZExtValue())
- << "ULL) /* " << StrVal << " */";
+ std::string StrVal = ftostr(CFP->getValueAPF());
+
+ while (StrVal[0] == ' ')
+ StrVal.erase(StrVal.begin());
+
+ // Check to make sure that the stringized number is not some string like
+ // "Inf" or NaN. Check that the string matches the "[-+]?[0-9]" regex.
+ if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+ ((StrVal[0] == '-' || StrVal[0] == '+') &&
+ (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
+ (CFP->isExactlyValue(atof(StrVal.c_str())))) {
+ if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+ Out << StrVal;
else
- Out << "BitsToFloat(0x"
- << utohexstr((uint32_t)CFP->getValueAPF().
- bitcastToAPInt().getZExtValue())
- << "U) /* " << StrVal << " */";
- Out << ")";
+ Out << StrVal << "f";
+ } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+ Out << "BitsToDouble(0x"
+ << utohexstr(CFP->getValueAPF().bitcastToAPInt().getZExtValue())
+ << "ULL) /* " << StrVal << " */";
+ else
+ Out << "BitsToFloat(0x"
+ << utohexstr((uint32_t)CFP->getValueAPF().
+ bitcastToAPInt().getZExtValue())
+ << "U) /* " << StrVal << " */";
+ Out << ")";
#if HAVE_PRINTF_A
- }
+ }
#endif
- Out << ")";
+ Out << ")";
+}
+
+void CppWriter::printCallingConv(CallingConv::ID cc){
+ // Print the calling convention.
+ switch (cc) {
+ case CallingConv::C: Out << "CallingConv::C"; break;
+ case CallingConv::Fast: Out << "CallingConv::Fast"; break;
+ case CallingConv::Cold: Out << "CallingConv::Cold"; break;
+ case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
+ default: Out << cc; break;
}
+}
- void CppWriter::printCallingConv(CallingConv::ID cc){
- // Print the calling convention.
- switch (cc) {
- case CallingConv::C: Out << "CallingConv::C"; break;
- case CallingConv::Fast: Out << "CallingConv::Fast"; break;
- case CallingConv::Cold: Out << "CallingConv::Cold"; break;
- case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
- default: Out << cc; break;
- }
+void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
+ switch (LT) {
+ case GlobalValue::InternalLinkage:
+ Out << "GlobalValue::InternalLinkage"; break;
+ case GlobalValue::PrivateLinkage:
+ Out << "GlobalValue::PrivateLinkage"; break;
+ case GlobalValue::LinkerPrivateLinkage:
+ Out << "GlobalValue::LinkerPrivateLinkage"; break;
+ case GlobalValue::LinkerPrivateWeakLinkage:
+ Out << "GlobalValue::LinkerPrivateWeakLinkage"; break;
+ case GlobalValue::AvailableExternallyLinkage:
+ Out << "GlobalValue::AvailableExternallyLinkage "; break;
+ case GlobalValue::LinkOnceAnyLinkage:
+ Out << "GlobalValue::LinkOnceAnyLinkage "; break;
+ case GlobalValue::LinkOnceODRLinkage:
+ Out << "GlobalValue::LinkOnceODRLinkage "; break;
+ case GlobalValue::WeakAnyLinkage:
+ Out << "GlobalValue::WeakAnyLinkage"; break;
+ case GlobalValue::WeakODRLinkage:
+ Out << "GlobalValue::WeakODRLinkage"; break;
+ case GlobalValue::AppendingLinkage:
+ Out << "GlobalValue::AppendingLinkage"; break;
+ case GlobalValue::ExternalLinkage:
+ Out << "GlobalValue::ExternalLinkage"; break;
+ case GlobalValue::DLLImportLinkage:
+ Out << "GlobalValue::DLLImportLinkage"; break;
+ case GlobalValue::DLLExportLinkage:
+ Out << "GlobalValue::DLLExportLinkage"; break;
+ case GlobalValue::ExternalWeakLinkage:
+ Out << "GlobalValue::ExternalWeakLinkage"; break;
+ case GlobalValue::CommonLinkage:
+ Out << "GlobalValue::CommonLinkage"; break;
}
+}
- void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
- switch (LT) {
- case GlobalValue::InternalLinkage:
- Out << "GlobalValue::InternalLinkage"; break;
- case GlobalValue::PrivateLinkage:
- Out << "GlobalValue::PrivateLinkage"; break;
- case GlobalValue::LinkerPrivateLinkage:
- Out << "GlobalValue::LinkerPrivateLinkage"; break;
- case GlobalValue::AvailableExternallyLinkage:
- Out << "GlobalValue::AvailableExternallyLinkage "; break;
- case GlobalValue::LinkOnceAnyLinkage:
- Out << "GlobalValue::LinkOnceAnyLinkage "; break;
- case GlobalValue::LinkOnceODRLinkage:
- Out << "GlobalValue::LinkOnceODRLinkage "; break;
- case GlobalValue::WeakAnyLinkage:
- Out << "GlobalValue::WeakAnyLinkage"; break;
- case GlobalValue::WeakODRLinkage:
- Out << "GlobalValue::WeakODRLinkage"; break;
- case GlobalValue::AppendingLinkage:
- Out << "GlobalValue::AppendingLinkage"; break;
- case GlobalValue::ExternalLinkage:
- Out << "GlobalValue::ExternalLinkage"; break;
- case GlobalValue::DLLImportLinkage:
- Out << "GlobalValue::DLLImportLinkage"; break;
- case GlobalValue::DLLExportLinkage:
- Out << "GlobalValue::DLLExportLinkage"; break;
- case GlobalValue::ExternalWeakLinkage:
- Out << "GlobalValue::ExternalWeakLinkage"; break;
- case GlobalValue::CommonLinkage:
- Out << "GlobalValue::CommonLinkage"; break;
- }
+void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
+ switch (VisType) {
+ default: llvm_unreachable("Unknown GVar visibility");
+ case GlobalValue::DefaultVisibility:
+ Out << "GlobalValue::DefaultVisibility";
+ break;
+ case GlobalValue::HiddenVisibility:
+ Out << "GlobalValue::HiddenVisibility";
+ break;
+ case GlobalValue::ProtectedVisibility:
+ Out << "GlobalValue::ProtectedVisibility";
+ break;
}
+}
- void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
- switch (VisType) {
- default: llvm_unreachable("Unknown GVar visibility");
- case GlobalValue::DefaultVisibility:
- Out << "GlobalValue::DefaultVisibility";
- break;
- case GlobalValue::HiddenVisibility:
- Out << "GlobalValue::HiddenVisibility";
- break;
- case GlobalValue::ProtectedVisibility:
- Out << "GlobalValue::ProtectedVisibility";
- break;
+// printEscapedString - Print each character of the specified string, escaping
+// it if it is not printable or if it is an escape char.
+void CppWriter::printEscapedString(const std::string &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char C = Str[i];
+ if (isprint(C) && C != '"' && C != '\\') {
+ Out << C;
+ } else {
+ Out << "\\x"
+ << (char) ((C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
+ << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
}
}
+}
- // printEscapedString - Print each character of the specified string, escaping
- // it if it is not printable or if it is an escape char.
- void CppWriter::printEscapedString(const std::string &Str) {
- for (unsigned i = 0, e = Str.size(); i != e; ++i) {
- unsigned char C = Str[i];
- if (isprint(C) && C != '"' && C != '\\') {
- Out << C;
- } else {
- Out << "\\x"
- << (char) ((C/16 < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
- << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
- }
+std::string CppWriter::getCppName(const Type* Ty) {
+ // First, handle the primitive types .. easy
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return "Type::getVoidTy(mod->getContext())";
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
+ return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
+ }
+ case Type::X86_FP80TyID: return "Type::getX86_FP80Ty(mod->getContext())";
+ case Type::FloatTyID: return "Type::getFloatTy(mod->getContext())";
+ case Type::DoubleTyID: return "Type::getDoubleTy(mod->getContext())";
+ case Type::LabelTyID: return "Type::getLabelTy(mod->getContext())";
+ default:
+ error("Invalid primitive type");
+ break;
}
+ // shouldn't be returned, but make it sensible
+ return "Type::getVoidTy(mod->getContext())";
}
- std::string CppWriter::getCppName(const Type* Ty) {
- // First, handle the primitive types .. easy
- if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
- switch (Ty->getTypeID()) {
- case Type::VoidTyID: return "Type::getVoidTy(mod->getContext())";
- case Type::IntegerTyID: {
- unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
- }
- case Type::X86_FP80TyID: return "Type::getX86_FP80Ty(mod->getContext())";
- case Type::FloatTyID: return "Type::getFloatTy(mod->getContext())";
- case Type::DoubleTyID: return "Type::getDoubleTy(mod->getContext())";
- case Type::LabelTyID: return "Type::getLabelTy(mod->getContext())";
- default:
- error("Invalid primitive type");
- break;
- }
- // shouldn't be returned, but make it sensible
- return "Type::getVoidTy(mod->getContext())";
- }
+ // Now, see if we've seen the type before and return that
+ TypeMap::iterator I = TypeNames.find(Ty);
+ if (I != TypeNames.end())
+ return I->second;
+
+ // Okay, let's build a new name for this type. Start with a prefix
+ const char* prefix = 0;
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: prefix = "FuncTy_"; break;
+ case Type::StructTyID: prefix = "StructTy_"; break;
+ case Type::ArrayTyID: prefix = "ArrayTy_"; break;
+ case Type::PointerTyID: prefix = "PointerTy_"; break;
+ case Type::OpaqueTyID: prefix = "OpaqueTy_"; break;
+ case Type::VectorTyID: prefix = "VectorTy_"; break;
+ default: prefix = "OtherTy_"; break; // prevent breakage
+ }
- // Now, see if we've seen the type before and return that
- TypeMap::iterator I = TypeNames.find(Ty);
- if (I != TypeNames.end())
- return I->second;
+ // See if the type has a name in the symboltable and build accordingly
+ const std::string* tName = findTypeName(TheModule->getTypeSymbolTable(), Ty);
+ std::string name;
+ if (tName)
+ name = std::string(prefix) + *tName;
+ else
+ name = std::string(prefix) + utostr(uniqueNum++);
+ sanitize(name);
+
+ // Save the name
+ return TypeNames[Ty] = name;
+}
- // Okay, let's build a new name for this type. Start with a prefix
- const char* prefix = 0;
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: prefix = "FuncTy_"; break;
- case Type::StructTyID: prefix = "StructTy_"; break;
- case Type::ArrayTyID: prefix = "ArrayTy_"; break;
- case Type::PointerTyID: prefix = "PointerTy_"; break;
- case Type::OpaqueTyID: prefix = "OpaqueTy_"; break;
- case Type::VectorTyID: prefix = "VectorTy_"; break;
- default: prefix = "OtherTy_"; break; // prevent breakage
- }
+void CppWriter::printCppName(const Type* Ty) {
+ printEscapedString(getCppName(Ty));
+}
- // See if the type has a name in the symboltable and build accordingly
- const std::string* tName = findTypeName(TheModule->getTypeSymbolTable(), Ty);
- std::string name;
- if (tName)
- name = std::string(prefix) + *tName;
- else
- name = std::string(prefix) + utostr(uniqueNum++);
- sanitize(name);
-
- // Save the name
- return TypeNames[Ty] = name;
- }
-
- void CppWriter::printCppName(const Type* Ty) {
- printEscapedString(getCppName(Ty));
- }
-
- std::string CppWriter::getCppName(const Value* val) {
- std::string name;
- ValueMap::iterator I = ValueNames.find(val);
- if (I != ValueNames.end() && I->first == val)
- return I->second;
-
- if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
- name = std::string("gvar_") +
- getTypePrefix(GV->getType()->getElementType());
- } else if (isa<Function>(val)) {
- name = std::string("func_");
- } else if (const Constant* C = dyn_cast<Constant>(val)) {
- name = std::string("const_") + getTypePrefix(C->getType());
- } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
- if (is_inline) {
- unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
- Function::const_arg_iterator(Arg)) + 1;
- name = std::string("arg_") + utostr(argNum);
- NameSet::iterator NI = UsedNames.find(name);
- if (NI != UsedNames.end())
- name += std::string("_") + utostr(uniqueNum++);
- UsedNames.insert(name);
- return ValueNames[val] = name;
- } else {
- name = getTypePrefix(val->getType());
- }
+std::string CppWriter::getCppName(const Value* val) {
+ std::string name;
+ ValueMap::iterator I = ValueNames.find(val);
+ if (I != ValueNames.end() && I->first == val)
+ return I->second;
+
+ if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
+ name = std::string("gvar_") +
+ getTypePrefix(GV->getType()->getElementType());
+ } else if (isa<Function>(val)) {
+ name = std::string("func_");
+ } else if (const Constant* C = dyn_cast<Constant>(val)) {
+ name = std::string("const_") + getTypePrefix(C->getType());
+ } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
+ if (is_inline) {
+ unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
+ Function::const_arg_iterator(Arg)) + 1;
+ name = std::string("arg_") + utostr(argNum);
+ NameSet::iterator NI = UsedNames.find(name);
+ if (NI != UsedNames.end())
+ name += std::string("_") + utostr(uniqueNum++);
+ UsedNames.insert(name);
+ return ValueNames[val] = name;
} else {
name = getTypePrefix(val->getType());
}
- if (val->hasName())
- name += val->getName();
- else
- name += utostr(uniqueNum++);
- sanitize(name);
- NameSet::iterator NI = UsedNames.find(name);
- if (NI != UsedNames.end())
- name += std::string("_") + utostr(uniqueNum++);
- UsedNames.insert(name);
- return ValueNames[val] = name;
+ } else {
+ name = getTypePrefix(val->getType());
}
+ if (val->hasName())
+ name += val->getName();
+ else
+ name += utostr(uniqueNum++);
+ sanitize(name);
+ NameSet::iterator NI = UsedNames.find(name);
+ if (NI != UsedNames.end())
+ name += std::string("_") + utostr(uniqueNum++);
+ UsedNames.insert(name);
+ return ValueNames[val] = name;
+}
- void CppWriter::printCppName(const Value* val) {
- printEscapedString(getCppName(val));
- }
+void CppWriter::printCppName(const Value* val) {
+ printEscapedString(getCppName(val));
+}
- void CppWriter::printAttributes(const AttrListPtr &PAL,
- const std::string &name) {
- Out << "AttrListPtr " << name << "_PAL;";
- nl(Out);
- if (!PAL.isEmpty()) {
- Out << '{'; in(); nl(Out);
- Out << "SmallVector<AttributeWithIndex, 4> Attrs;"; nl(Out);
- Out << "AttributeWithIndex PAWI;"; nl(Out);
- for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
- unsigned index = PAL.getSlot(i).Index;
- Attributes attrs = PAL.getSlot(i).Attrs;
- Out << "PAWI.Index = " << index << "U; PAWI.Attrs = 0 ";
+void CppWriter::printAttributes(const AttrListPtr &PAL,
+ const std::string &name) {
+ Out << "AttrListPtr " << name << "_PAL;";
+ nl(Out);
+ if (!PAL.isEmpty()) {
+ Out << '{'; in(); nl(Out);
+ Out << "SmallVector<AttributeWithIndex, 4> Attrs;"; nl(Out);
+ Out << "AttributeWithIndex PAWI;"; nl(Out);
+ for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
+ unsigned index = PAL.getSlot(i).Index;
+ Attributes attrs = PAL.getSlot(i).Attrs;
+ Out << "PAWI.Index = " << index << "U; PAWI.Attrs = 0 ";
#define HANDLE_ATTR(X) \
- if (attrs & Attribute::X) \
- Out << " | Attribute::" #X; \
- attrs &= ~Attribute::X;
-
- HANDLE_ATTR(SExt);
- HANDLE_ATTR(ZExt);
- HANDLE_ATTR(NoReturn);
- HANDLE_ATTR(InReg);
- HANDLE_ATTR(StructRet);
- HANDLE_ATTR(NoUnwind);
- HANDLE_ATTR(NoAlias);
- HANDLE_ATTR(ByVal);
- HANDLE_ATTR(Nest);
- HANDLE_ATTR(ReadNone);
- HANDLE_ATTR(ReadOnly);
- HANDLE_ATTR(InlineHint);
- HANDLE_ATTR(NoInline);
- HANDLE_ATTR(AlwaysInline);
- HANDLE_ATTR(OptimizeForSize);
- HANDLE_ATTR(StackProtect);
- HANDLE_ATTR(StackProtectReq);
- HANDLE_ATTR(NoCapture);
+ if (attrs & Attribute::X) \
+ Out << " | Attribute::" #X; \
+ attrs &= ~Attribute::X;
+
+ HANDLE_ATTR(SExt);
+ HANDLE_ATTR(ZExt);
+ HANDLE_ATTR(NoReturn);
+ HANDLE_ATTR(InReg);
+ HANDLE_ATTR(StructRet);
+ HANDLE_ATTR(NoUnwind);
+ HANDLE_ATTR(NoAlias);
+ HANDLE_ATTR(ByVal);
+ HANDLE_ATTR(Nest);
+ HANDLE_ATTR(ReadNone);
+ HANDLE_ATTR(ReadOnly);
+ HANDLE_ATTR(InlineHint);
+ HANDLE_ATTR(NoInline);
+ HANDLE_ATTR(AlwaysInline);
+ HANDLE_ATTR(OptimizeForSize);
+ HANDLE_ATTR(StackProtect);
+ HANDLE_ATTR(StackProtectReq);
+ HANDLE_ATTR(NoCapture);
#undef HANDLE_ATTR
- assert(attrs == 0 && "Unhandled attribute!");
- Out << ";";
- nl(Out);
- Out << "Attrs.push_back(PAWI);";
- nl(Out);
- }
- Out << name << "_PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());";
+ assert(attrs == 0 && "Unhandled attribute!");
+ Out << ";";
+ nl(Out);
+ Out << "Attrs.push_back(PAWI);";
nl(Out);
- out(); nl(Out);
- Out << '}'; nl(Out);
}
+ Out << name << "_PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());";
+ nl(Out);
+ out(); nl(Out);
+ Out << '}'; nl(Out);
}
+}
- bool CppWriter::printTypeInternal(const Type* Ty) {
- // We don't print definitions for primitive types
- if (Ty->isPrimitiveType() || Ty->isIntegerTy())
- return false;
-
- // If we already defined this type, we don't need to define it again.
- if (DefinedTypes.find(Ty) != DefinedTypes.end())
- return false;
-
- // Everything below needs the name for the type so get it now.
- std::string typeName(getCppName(Ty));
-
- // Search the type stack for recursion. If we find it, then generate this
- // as an OpaqueType, but make sure not to do this multiple times because
- // the type could appear in multiple places on the stack. Once the opaque
- // definition is issued, it must not be re-issued. Consequently we have to
- // check the UnresolvedTypes list as well.
- TypeList::const_iterator TI = std::find(TypeStack.begin(), TypeStack.end(),
- Ty);
- if (TI != TypeStack.end()) {
- TypeMap::const_iterator I = UnresolvedTypes.find(Ty);
- if (I == UnresolvedTypes.end()) {
- Out << "PATypeHolder " << typeName;
- Out << "_fwd = OpaqueType::get(mod->getContext());";
- nl(Out);
- UnresolvedTypes[Ty] = typeName;
- }
- return true;
- }
+bool CppWriter::printTypeInternal(const Type* Ty) {
+ // We don't print definitions for primitive types
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy())
+ return false;
- // We're going to print a derived type which, by definition, contains other
- // types. So, push this one we're printing onto the type stack to assist with
- // recursive definitions.
- TypeStack.push_back(Ty);
+ // If we already defined this type, we don't need to define it again.
+ if (DefinedTypes.find(Ty) != DefinedTypes.end())
+ return false;
- // Print the type definition
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: {
- const FunctionType* FT = cast<FunctionType>(Ty);
- Out << "std::vector<const Type*>" << typeName << "_args;";
+ // Everything below needs the name for the type so get it now.
+ std::string typeName(getCppName(Ty));
+
+ // Search the type stack for recursion. If we find it, then generate this
+ // as an OpaqueType, but make sure not to do this multiple times because
+ // the type could appear in multiple places on the stack. Once the opaque
+ // definition is issued, it must not be re-issued. Consequently we have to
+ // check the UnresolvedTypes list as well.
+ TypeList::const_iterator TI = std::find(TypeStack.begin(), TypeStack.end(),
+ Ty);
+ if (TI != TypeStack.end()) {
+ TypeMap::const_iterator I = UnresolvedTypes.find(Ty);
+ if (I == UnresolvedTypes.end()) {
+ Out << "PATypeHolder " << typeName;
+ Out << "_fwd = OpaqueType::get(mod->getContext());";
nl(Out);
- FunctionType::param_iterator PI = FT->param_begin();
- FunctionType::param_iterator PE = FT->param_end();
- for (; PI != PE; ++PI) {
- const Type* argTy = static_cast<const Type*>(*PI);
- bool isForward = printTypeInternal(argTy);
- std::string argName(getCppName(argTy));
- Out << typeName << "_args.push_back(" << argName;
- if (isForward)
- Out << "_fwd";
- Out << ");";
- nl(Out);
- }
- bool isForward = printTypeInternal(FT->getReturnType());
- std::string retTypeName(getCppName(FT->getReturnType()));
- Out << "FunctionType* " << typeName << " = FunctionType::get(";
- in(); nl(Out) << "/*Result=*/" << retTypeName;
+ UnresolvedTypes[Ty] = typeName;
+ }
+ return true;
+ }
+
+ // We're going to print a derived type which, by definition, contains other
+ // types. So, push this one we're printing onto the type stack to assist with
+ // recursive definitions.
+ TypeStack.push_back(Ty);
+
+ // Print the type definition
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: {
+ const FunctionType* FT = cast<FunctionType>(Ty);
+ Out << "std::vector<const Type*>" << typeName << "_args;";
+ nl(Out);
+ FunctionType::param_iterator PI = FT->param_begin();
+ FunctionType::param_iterator PE = FT->param_end();
+ for (; PI != PE; ++PI) {
+ const Type* argTy = static_cast<const Type*>(*PI);
+ bool isForward = printTypeInternal(argTy);
+ std::string argName(getCppName(argTy));
+ Out << typeName << "_args.push_back(" << argName;
if (isForward)
Out << "_fwd";
- Out << ",";
- nl(Out) << "/*Params=*/" << typeName << "_args,";
- nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
- out();
- nl(Out);
- break;
- }
- case Type::StructTyID: {
- const StructType* ST = cast<StructType>(Ty);
- Out << "std::vector<const Type*>" << typeName << "_fields;";
- nl(Out);
- StructType::element_iterator EI = ST->element_begin();
- StructType::element_iterator EE = ST->element_end();
- for (; EI != EE; ++EI) {
- const Type* fieldTy = static_cast<const Type*>(*EI);
- bool isForward = printTypeInternal(fieldTy);
- std::string fieldName(getCppName(fieldTy));
- Out << typeName << "_fields.push_back(" << fieldName;
- if (isForward)
- Out << "_fwd";
- Out << ");";
- nl(Out);
- }
- Out << "StructType* " << typeName << " = StructType::get("
- << "mod->getContext(), "
- << typeName << "_fields, /*isPacked=*/"
- << (ST->isPacked() ? "true" : "false") << ");";
- nl(Out);
- break;
- }
- case Type::ArrayTyID: {
- const ArrayType* AT = cast<ArrayType>(Ty);
- const Type* ET = AT->getElementType();
- bool isForward = printTypeInternal(ET);
- std::string elemName(getCppName(ET));
- Out << "ArrayType* " << typeName << " = ArrayType::get("
- << elemName << (isForward ? "_fwd" : "")
- << ", " << utostr(AT->getNumElements()) << ");";
- nl(Out);
- break;
- }
- case Type::PointerTyID: {
- const PointerType* PT = cast<PointerType>(Ty);
- const Type* ET = PT->getElementType();
- bool isForward = printTypeInternal(ET);
- std::string elemName(getCppName(ET));
- Out << "PointerType* " << typeName << " = PointerType::get("
- << elemName << (isForward ? "_fwd" : "")
- << ", " << utostr(PT->getAddressSpace()) << ");";
- nl(Out);
- break;
- }
- case Type::VectorTyID: {
- const VectorType* PT = cast<VectorType>(Ty);
- const Type* ET = PT->getElementType();
- bool isForward = printTypeInternal(ET);
- std::string elemName(getCppName(ET));
- Out << "VectorType* " << typeName << " = VectorType::get("
- << elemName << (isForward ? "_fwd" : "")
- << ", " << utostr(PT->getNumElements()) << ");";
- nl(Out);
- break;
- }
- case Type::OpaqueTyID: {
- Out << "OpaqueType* " << typeName;
- Out << " = OpaqueType::get(mod->getContext());";
+ Out << ");";
nl(Out);
- break;
- }
- default:
- error("Invalid TypeID");
}
-
- // If the type had a name, make sure we recreate it.
- const std::string* progTypeName =
- findTypeName(TheModule->getTypeSymbolTable(),Ty);
- if (progTypeName) {
- Out << "mod->addTypeName(\"" << *progTypeName << "\", "
- << typeName << ");";
+ bool isForward = printTypeInternal(FT->getReturnType());
+ std::string retTypeName(getCppName(FT->getReturnType()));
+ Out << "FunctionType* " << typeName << " = FunctionType::get(";
+ in(); nl(Out) << "/*Result=*/" << retTypeName;
+ if (isForward)
+ Out << "_fwd";
+ Out << ",";
+ nl(Out) << "/*Params=*/" << typeName << "_args,";
+ nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
+ out();
+ nl(Out);
+ break;
+ }
+ case Type::StructTyID: {
+ const StructType* ST = cast<StructType>(Ty);
+ Out << "std::vector<const Type*>" << typeName << "_fields;";
+ nl(Out);
+ StructType::element_iterator EI = ST->element_begin();
+ StructType::element_iterator EE = ST->element_end();
+ for (; EI != EE; ++EI) {
+ const Type* fieldTy = static_cast<const Type*>(*EI);
+ bool isForward = printTypeInternal(fieldTy);
+ std::string fieldName(getCppName(fieldTy));
+ Out << typeName << "_fields.push_back(" << fieldName;
+ if (isForward)
+ Out << "_fwd";
+ Out << ");";
nl(Out);
}
+ Out << "StructType* " << typeName << " = StructType::get("
+ << "mod->getContext(), "
+ << typeName << "_fields, /*isPacked=*/"
+ << (ST->isPacked() ? "true" : "false") << ");";
+ nl(Out);
+ break;
+ }
+ case Type::ArrayTyID: {
+ const ArrayType* AT = cast<ArrayType>(Ty);
+ const Type* ET = AT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "ArrayType* " << typeName << " = ArrayType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(AT->getNumElements()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::PointerTyID: {
+ const PointerType* PT = cast<PointerType>(Ty);
+ const Type* ET = PT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "PointerType* " << typeName << " = PointerType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(PT->getAddressSpace()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::VectorTyID: {
+ const VectorType* PT = cast<VectorType>(Ty);
+ const Type* ET = PT->getElementType();
+ bool isForward = printTypeInternal(ET);
+ std::string elemName(getCppName(ET));
+ Out << "VectorType* " << typeName << " = VectorType::get("
+ << elemName << (isForward ? "_fwd" : "")
+ << ", " << utostr(PT->getNumElements()) << ");";
+ nl(Out);
+ break;
+ }
+ case Type::OpaqueTyID: {
+ Out << "OpaqueType* " << typeName;
+ Out << " = OpaqueType::get(mod->getContext());";
+ nl(Out);
+ break;
+ }
+ default:
+ error("Invalid TypeID");
+ }
- // Pop us off the type stack
- TypeStack.pop_back();
+ // If the type had a name, make sure we recreate it.
+ const std::string* progTypeName =
+ findTypeName(TheModule->getTypeSymbolTable(),Ty);
+ if (progTypeName) {
+ Out << "mod->addTypeName(\"" << *progTypeName << "\", "
+ << typeName << ");";
+ nl(Out);
+ }
- // Indicate that this type is now defined.
- DefinedTypes.insert(Ty);
+ // Pop us off the type stack
+ TypeStack.pop_back();
- // Early resolve as many unresolved types as possible. Search the unresolved
- // types map for the type we just printed. Now that its definition is complete
- // we can resolve any previous references to it. This prevents a cascade of
- // unresolved types.
- TypeMap::iterator I = UnresolvedTypes.find(Ty);
- if (I != UnresolvedTypes.end()) {
- Out << "cast<OpaqueType>(" << I->second
- << "_fwd.get())->refineAbstractTypeTo(" << I->second << ");";
- nl(Out);
- Out << I->second << " = cast<";
- switch (Ty->getTypeID()) {
- case Type::FunctionTyID: Out << "FunctionType"; break;
- case Type::ArrayTyID: Out << "ArrayType"; break;
- case Type::StructTyID: Out << "StructType"; break;
- case Type::VectorTyID: Out << "VectorType"; break;
- case Type::PointerTyID: Out << "PointerType"; break;
- case Type::OpaqueTyID: Out << "OpaqueType"; break;
- default: Out << "NoSuchDerivedType"; break;
- }
- Out << ">(" << I->second << "_fwd.get());";
- nl(Out); nl(Out);
- UnresolvedTypes.erase(I);
- }
+ // Indicate that this type is now defined.
+ DefinedTypes.insert(Ty);
- // Finally, separate the type definition from other with a newline.
+ // Early resolve as many unresolved types as possible. Search the unresolved
+ // types map for the type we just printed. Now that its definition is complete
+ // we can resolve any previous references to it. This prevents a cascade of
+ // unresolved types.
+ TypeMap::iterator I = UnresolvedTypes.find(Ty);
+ if (I != UnresolvedTypes.end()) {
+ Out << "cast<OpaqueType>(" << I->second
+ << "_fwd.get())->refineAbstractTypeTo(" << I->second << ");";
nl(Out);
-
- // We weren't a recursive type
- return false;
+ Out << I->second << " = cast<";
+ switch (Ty->getTypeID()) {
+ case Type::FunctionTyID: Out << "FunctionType"; break;
+ case Type::ArrayTyID: Out << "ArrayType"; break;
+ case Type::StructTyID: Out << "StructType"; break;
+ case Type::VectorTyID: Out << "VectorType"; break;
+ case Type::PointerTyID: Out << "PointerType"; break;
+ case Type::OpaqueTyID: Out << "OpaqueType"; break;
+ default: Out << "NoSuchDerivedType"; break;
+ }
+ Out << ">(" << I->second << "_fwd.get());";
+ nl(Out); nl(Out);
+ UnresolvedTypes.erase(I);
}
- // Prints a type definition. Returns true if it could not resolve all the
- // types in the definition but had to use a forward reference.
- void CppWriter::printType(const Type* Ty) {
- assert(TypeStack.empty());
- TypeStack.clear();
- printTypeInternal(Ty);
- assert(TypeStack.empty());
- }
-
- void CppWriter::printTypes(const Module* M) {
- // Walk the symbol table and print out all its types
- const TypeSymbolTable& symtab = M->getTypeSymbolTable();
- for (TypeSymbolTable::const_iterator TI = symtab.begin(), TE = symtab.end();
- TI != TE; ++TI) {
-
- // For primitive types and types already defined, just add a name
- TypeMap::const_iterator TNI = TypeNames.find(TI->second);
- if (TI->second->isIntegerTy() || TI->second->isPrimitiveType() ||
- TNI != TypeNames.end()) {
- Out << "mod->addTypeName(\"";
- printEscapedString(TI->first);
- Out << "\", " << getCppName(TI->second) << ");";
- nl(Out);
- // For everything else, define the type
- } else {
- printType(TI->second);
- }
- }
+ // Finally, separate the type definition from other with a newline.
+ nl(Out);
- // Add all of the global variables to the value table...
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I) {
- if (I->hasInitializer())
- printType(I->getInitializer()->getType());
- printType(I->getType());
+ // We weren't a recursive type
+ return false;
+}
+
+// Prints a type definition. Returns true if it could not resolve all the
+// types in the definition but had to use a forward reference.
+void CppWriter::printType(const Type* Ty) {
+ assert(TypeStack.empty());
+ TypeStack.clear();
+ printTypeInternal(Ty);
+ assert(TypeStack.empty());
+}
+
+void CppWriter::printTypes(const Module* M) {
+ // Walk the symbol table and print out all its types
+ const TypeSymbolTable& symtab = M->getTypeSymbolTable();
+ for (TypeSymbolTable::const_iterator TI = symtab.begin(), TE = symtab.end();
+ TI != TE; ++TI) {
+
+ // For primitive types and types already defined, just add a name
+ TypeMap::const_iterator TNI = TypeNames.find(TI->second);
+ if (TI->second->isIntegerTy() || TI->second->isPrimitiveType() ||
+ TNI != TypeNames.end()) {
+ Out << "mod->addTypeName(\"";
+ printEscapedString(TI->first);
+ Out << "\", " << getCppName(TI->second) << ");";
+ nl(Out);
+ // For everything else, define the type
+ } else {
+ printType(TI->second);
}
+ }
- // Add all the functions to the table
- for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- printType(FI->getReturnType());
- printType(FI->getFunctionType());
- // Add all the function arguments
- for (Function::const_arg_iterator AI = FI->arg_begin(),
- AE = FI->arg_end(); AI != AE; ++AI) {
- printType(AI->getType());
- }
+ // Add all of the global variables to the value table...
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ if (I->hasInitializer())
+ printType(I->getInitializer()->getType());
+ printType(I->getType());
+ }
- // Add all of the basic blocks and instructions
- for (Function::const_iterator BB = FI->begin(),
- E = FI->end(); BB != E; ++BB) {
- printType(BB->getType());
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
- ++I) {
- printType(I->getType());
- for (unsigned i = 0; i < I->getNumOperands(); ++i)
- printType(I->getOperand(i)->getType());
- }
+ // Add all the functions to the table
+ for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ printType(FI->getReturnType());
+ printType(FI->getFunctionType());
+ // Add all the function arguments
+ for (Function::const_arg_iterator AI = FI->arg_begin(),
+ AE = FI->arg_end(); AI != AE; ++AI) {
+ printType(AI->getType());
+ }
+
+ // Add all of the basic blocks and instructions
+ for (Function::const_iterator BB = FI->begin(),
+ E = FI->end(); BB != E; ++BB) {
+ printType(BB->getType());
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+ ++I) {
+ printType(I->getType());
+ for (unsigned i = 0; i < I->getNumOperands(); ++i)
+ printType(I->getOperand(i)->getType());
}
}
}
+}
- // printConstant - Print out a constant pool entry...
- void CppWriter::printConstant(const Constant *CV) {
- // First, if the constant is actually a GlobalValue (variable or function)
- // or its already in the constant list then we've printed it already and we
- // can just return.
- if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
- return;
+// printConstant - Print out a constant pool entry...
+void CppWriter::printConstant(const Constant *CV) {
+ // First, if the constant is actually a GlobalValue (variable or function)
+ // or its already in the constant list then we've printed it already and we
+ // can just return.
+ if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
+ return;
- std::string constName(getCppName(CV));
- std::string typeName(getCppName(CV->getType()));
+ std::string constName(getCppName(CV));
+ std::string typeName(getCppName(CV->getType()));
- if (isa<GlobalValue>(CV)) {
- // Skip variables and functions, we emit them elsewhere
- return;
- }
+ if (isa<GlobalValue>(CV)) {
+ // Skip variables and functions, we emit them elsewhere
+ return;
+ }
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- std::string constValue = CI->getValue().toString(10, true);
- Out << "ConstantInt* " << constName
- << " = ConstantInt::get(mod->getContext(), APInt("
- << cast<IntegerType>(CI->getType())->getBitWidth()
- << ", StringRef(\"" << constValue << "\"), 10));";
- } else if (isa<ConstantAggregateZero>(CV)) {
- Out << "ConstantAggregateZero* " << constName
- << " = ConstantAggregateZero::get(" << typeName << ");";
- } else if (isa<ConstantPointerNull>(CV)) {
- Out << "ConstantPointerNull* " << constName
- << " = ConstantPointerNull::get(" << typeName << ");";
- } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
- Out << "ConstantFP* " << constName << " = ";
- printCFP(CFP);
- Out << ";";
- } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
- if (CA->isString() &&
- CA->getType()->getElementType() ==
- Type::getInt8Ty(CA->getContext())) {
- Out << "Constant* " << constName <<
- " = ConstantArray::get(mod->getContext(), \"";
- std::string tmp = CA->getAsString();
- bool nullTerminate = false;
- if (tmp[tmp.length()-1] == 0) {
- tmp.erase(tmp.length()-1);
- nullTerminate = true;
- }
- printEscapedString(tmp);
- // Determine if we want null termination or not.
- if (nullTerminate)
- Out << "\", true"; // Indicate that the null terminator should be
- // added.
- else
- Out << "\", false";// No null terminator
- Out << ");";
- } else {
- Out << "std::vector<Constant*> " << constName << "_elems;";
- nl(Out);
- unsigned N = CA->getNumOperands();
- for (unsigned i = 0; i < N; ++i) {
- printConstant(CA->getOperand(i)); // recurse to print operands
- Out << constName << "_elems.push_back("
- << getCppName(CA->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName << " = ConstantArray::get("
- << typeName << ", " << constName << "_elems);";
- }
- } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
- Out << "std::vector<Constant*> " << constName << "_fields;";
- nl(Out);
- unsigned N = CS->getNumOperands();
- for (unsigned i = 0; i < N; i++) {
- printConstant(CS->getOperand(i));
- Out << constName << "_fields.push_back("
- << getCppName(CS->getOperand(i)) << ");";
- nl(Out);
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+ std::string constValue = CI->getValue().toString(10, true);
+ Out << "ConstantInt* " << constName
+ << " = ConstantInt::get(mod->getContext(), APInt("
+ << cast<IntegerType>(CI->getType())->getBitWidth()
+ << ", StringRef(\"" << constValue << "\"), 10));";
+ } else if (isa<ConstantAggregateZero>(CV)) {
+ Out << "ConstantAggregateZero* " << constName
+ << " = ConstantAggregateZero::get(" << typeName << ");";
+ } else if (isa<ConstantPointerNull>(CV)) {
+ Out << "ConstantPointerNull* " << constName
+ << " = ConstantPointerNull::get(" << typeName << ");";
+ } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ Out << "ConstantFP* " << constName << " = ";
+ printCFP(CFP);
+ Out << ";";
+ } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+ if (CA->isString() &&
+ CA->getType()->getElementType() ==
+ Type::getInt8Ty(CA->getContext())) {
+ Out << "Constant* " << constName <<
+ " = ConstantArray::get(mod->getContext(), \"";
+ std::string tmp = CA->getAsString();
+ bool nullTerminate = false;
+ if (tmp[tmp.length()-1] == 0) {
+ tmp.erase(tmp.length()-1);
+ nullTerminate = true;
}
- Out << "Constant* " << constName << " = ConstantStruct::get("
- << typeName << ", " << constName << "_fields);";
- } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+ printEscapedString(tmp);
+ // Determine if we want null termination or not.
+ if (nullTerminate)
+ Out << "\", true"; // Indicate that the null terminator should be
+ // added.
+ else
+ Out << "\", false";// No null terminator
+ Out << ");";
+ } else {
Out << "std::vector<Constant*> " << constName << "_elems;";
nl(Out);
- unsigned N = CP->getNumOperands();
+ unsigned N = CA->getNumOperands();
for (unsigned i = 0; i < N; ++i) {
- printConstant(CP->getOperand(i));
+ printConstant(CA->getOperand(i)); // recurse to print operands
Out << constName << "_elems.push_back("
- << getCppName(CP->getOperand(i)) << ");";
+ << getCppName(CA->getOperand(i)) << ");";
nl(Out);
}
- Out << "Constant* " << constName << " = ConstantVector::get("
+ Out << "Constant* " << constName << " = ConstantArray::get("
<< typeName << ", " << constName << "_elems);";
- } else if (isa<UndefValue>(CV)) {
- Out << "UndefValue* " << constName << " = UndefValue::get("
- << typeName << ");";
- } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
- if (CE->getOpcode() == Instruction::GetElementPtr) {
- Out << "std::vector<Constant*> " << constName << "_indices;";
+ }
+ } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+ Out << "std::vector<Constant*> " << constName << "_fields;";
+ nl(Out);
+ unsigned N = CS->getNumOperands();
+ for (unsigned i = 0; i < N; i++) {
+ printConstant(CS->getOperand(i));
+ Out << constName << "_fields.push_back("
+ << getCppName(CS->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName << " = ConstantStruct::get("
+ << typeName << ", " << constName << "_fields);";
+ } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+ Out << "std::vector<Constant*> " << constName << "_elems;";
+ nl(Out);
+ unsigned N = CP->getNumOperands();
+ for (unsigned i = 0; i < N; ++i) {
+ printConstant(CP->getOperand(i));
+ Out << constName << "_elems.push_back("
+ << getCppName(CP->getOperand(i)) << ");";
+ nl(Out);
+ }
+ Out << "Constant* " << constName << " = ConstantVector::get("
+ << typeName << ", " << constName << "_elems);";
+ } else if (isa<UndefValue>(CV)) {
+ Out << "UndefValue* " << constName << " = UndefValue::get("
+ << typeName << ");";
+ } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+ if (CE->getOpcode() == Instruction::GetElementPtr) {
+ Out << "std::vector<Constant*> " << constName << "_indices;";
+ nl(Out);
+ printConstant(CE->getOperand(0));
+ for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
+ printConstant(CE->getOperand(i));
+ Out << constName << "_indices.push_back("
+ << getCppName(CE->getOperand(i)) << ");";
nl(Out);
- printConstant(CE->getOperand(0));
- for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
- printConstant(CE->getOperand(i));
- Out << constName << "_indices.push_back("
- << getCppName(CE->getOperand(i)) << ");";
- nl(Out);
- }
- Out << "Constant* " << constName
- << " = ConstantExpr::getGetElementPtr("
- << getCppName(CE->getOperand(0)) << ", "
- << "&" << constName << "_indices[0], "
- << constName << "_indices.size()"
- << ");";
- } else if (CE->isCast()) {
- printConstant(CE->getOperand(0));
- Out << "Constant* " << constName << " = ConstantExpr::getCast(";
- switch (CE->getOpcode()) {
- default: llvm_unreachable("Invalid cast opcode");
- case Instruction::Trunc: Out << "Instruction::Trunc"; break;
- case Instruction::ZExt: Out << "Instruction::ZExt"; break;
- case Instruction::SExt: Out << "Instruction::SExt"; break;
- case Instruction::FPTrunc: Out << "Instruction::FPTrunc"; break;
- case Instruction::FPExt: Out << "Instruction::FPExt"; break;
- case Instruction::FPToUI: Out << "Instruction::FPToUI"; break;
- case Instruction::FPToSI: Out << "Instruction::FPToSI"; break;
- case Instruction::UIToFP: Out << "Instruction::UIToFP"; break;
- case Instruction::SIToFP: Out << "Instruction::SIToFP"; break;
- case Instruction::PtrToInt: Out << "Instruction::PtrToInt"; break;
- case Instruction::IntToPtr: Out << "Instruction::IntToPtr"; break;
- case Instruction::BitCast: Out << "Instruction::BitCast"; break;
- }
- Out << ", " << getCppName(CE->getOperand(0)) << ", "
- << getCppName(CE->getType()) << ");";
- } else {
- unsigned N = CE->getNumOperands();
- for (unsigned i = 0; i < N; ++i ) {
- printConstant(CE->getOperand(i));
+ }
+ Out << "Constant* " << constName
+ << " = ConstantExpr::getGetElementPtr("
+ << getCppName(CE->getOperand(0)) << ", "
+ << "&" << constName << "_indices[0], "
+ << constName << "_indices.size()"
+ << ");";
+ } else if (CE->isCast()) {
+ printConstant(CE->getOperand(0));
+ Out << "Constant* " << constName << " = ConstantExpr::getCast(";
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid cast opcode");
+ case Instruction::Trunc: Out << "Instruction::Trunc"; break;
+ case Instruction::ZExt: Out << "Instruction::ZExt"; break;
+ case Instruction::SExt: Out << "Instruction::SExt"; break;
+ case Instruction::FPTrunc: Out << "Instruction::FPTrunc"; break;
+ case Instruction::FPExt: Out << "Instruction::FPExt"; break;
+ case Instruction::FPToUI: Out << "Instruction::FPToUI"; break;
+ case Instruction::FPToSI: Out << "Instruction::FPToSI"; break;
+ case Instruction::UIToFP: Out << "Instruction::UIToFP"; break;
+ case Instruction::SIToFP: Out << "Instruction::SIToFP"; break;
+ case Instruction::PtrToInt: Out << "Instruction::PtrToInt"; break;
+ case Instruction::IntToPtr: Out << "Instruction::IntToPtr"; break;
+ case Instruction::BitCast: Out << "Instruction::BitCast"; break;
+ }
+ Out << ", " << getCppName(CE->getOperand(0)) << ", "
+ << getCppName(CE->getType()) << ");";
+ } else {
+ unsigned N = CE->getNumOperands();
+ for (unsigned i = 0; i < N; ++i ) {
+ printConstant(CE->getOperand(i));
+ }
+ Out << "Constant* " << constName << " = ConstantExpr::";
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Out << "getAdd("; break;
+ case Instruction::FAdd: Out << "getFAdd("; break;
+ case Instruction::Sub: Out << "getSub("; break;
+ case Instruction::FSub: Out << "getFSub("; break;
+ case Instruction::Mul: Out << "getMul("; break;
+ case Instruction::FMul: Out << "getFMul("; break;
+ case Instruction::UDiv: Out << "getUDiv("; break;
+ case Instruction::SDiv: Out << "getSDiv("; break;
+ case Instruction::FDiv: Out << "getFDiv("; break;
+ case Instruction::URem: Out << "getURem("; break;
+ case Instruction::SRem: Out << "getSRem("; break;
+ case Instruction::FRem: Out << "getFRem("; break;
+ case Instruction::And: Out << "getAnd("; break;
+ case Instruction::Or: Out << "getOr("; break;
+ case Instruction::Xor: Out << "getXor("; break;
+ case Instruction::ICmp:
+ Out << "getICmp(ICmpInst::ICMP_";
+ switch (CE->getPredicate()) {
+ case ICmpInst::ICMP_EQ: Out << "EQ"; break;
+ case ICmpInst::ICMP_NE: Out << "NE"; break;
+ case ICmpInst::ICMP_SLT: Out << "SLT"; break;
+ case ICmpInst::ICMP_ULT: Out << "ULT"; break;
+ case ICmpInst::ICMP_SGT: Out << "SGT"; break;
+ case ICmpInst::ICMP_UGT: Out << "UGT"; break;
+ case ICmpInst::ICMP_SLE: Out << "SLE"; break;
+ case ICmpInst::ICMP_ULE: Out << "ULE"; break;
+ case ICmpInst::ICMP_SGE: Out << "SGE"; break;
+ case ICmpInst::ICMP_UGE: Out << "UGE"; break;
+ default: error("Invalid ICmp Predicate");
}
- Out << "Constant* " << constName << " = ConstantExpr::";
- switch (CE->getOpcode()) {
- case Instruction::Add: Out << "getAdd("; break;
- case Instruction::FAdd: Out << "getFAdd("; break;
- case Instruction::Sub: Out << "getSub("; break;
- case Instruction::FSub: Out << "getFSub("; break;
- case Instruction::Mul: Out << "getMul("; break;
- case Instruction::FMul: Out << "getFMul("; break;
- case Instruction::UDiv: Out << "getUDiv("; break;
- case Instruction::SDiv: Out << "getSDiv("; break;
- case Instruction::FDiv: Out << "getFDiv("; break;
- case Instruction::URem: Out << "getURem("; break;
- case Instruction::SRem: Out << "getSRem("; break;
- case Instruction::FRem: Out << "getFRem("; break;
- case Instruction::And: Out << "getAnd("; break;
- case Instruction::Or: Out << "getOr("; break;
- case Instruction::Xor: Out << "getXor("; break;
- case Instruction::ICmp:
- Out << "getICmp(ICmpInst::ICMP_";
- switch (CE->getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << "EQ"; break;
- case ICmpInst::ICMP_NE: Out << "NE"; break;
- case ICmpInst::ICMP_SLT: Out << "SLT"; break;
- case ICmpInst::ICMP_ULT: Out << "ULT"; break;
- case ICmpInst::ICMP_SGT: Out << "SGT"; break;
- case ICmpInst::ICMP_UGT: Out << "UGT"; break;
- case ICmpInst::ICMP_SLE: Out << "SLE"; break;
- case ICmpInst::ICMP_ULE: Out << "ULE"; break;
- case ICmpInst::ICMP_SGE: Out << "SGE"; break;
- case ICmpInst::ICMP_UGE: Out << "UGE"; break;
- default: error("Invalid ICmp Predicate");
- }
- break;
- case Instruction::FCmp:
- Out << "getFCmp(FCmpInst::FCMP_";
- switch (CE->getPredicate()) {
- case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
- case FCmpInst::FCMP_ORD: Out << "ORD"; break;
- case FCmpInst::FCMP_UNO: Out << "UNO"; break;
- case FCmpInst::FCMP_OEQ: Out << "OEQ"; break;
- case FCmpInst::FCMP_UEQ: Out << "UEQ"; break;
- case FCmpInst::FCMP_ONE: Out << "ONE"; break;
- case FCmpInst::FCMP_UNE: Out << "UNE"; break;
- case FCmpInst::FCMP_OLT: Out << "OLT"; break;
- case FCmpInst::FCMP_ULT: Out << "ULT"; break;
- case FCmpInst::FCMP_OGT: Out << "OGT"; break;
- case FCmpInst::FCMP_UGT: Out << "UGT"; break;
- case FCmpInst::FCMP_OLE: Out << "OLE"; break;
- case FCmpInst::FCMP_ULE: Out << "ULE"; break;
- case FCmpInst::FCMP_OGE: Out << "OGE"; break;
- case FCmpInst::FCMP_UGE: Out << "UGE"; break;
- case FCmpInst::FCMP_TRUE: Out << "TRUE"; break;
- default: error("Invalid FCmp Predicate");
- }
- break;
- case Instruction::Shl: Out << "getShl("; break;
- case Instruction::LShr: Out << "getLShr("; break;
- case Instruction::AShr: Out << "getAShr("; break;
- case Instruction::Select: Out << "getSelect("; break;
- case Instruction::ExtractElement: Out << "getExtractElement("; break;
- case Instruction::InsertElement: Out << "getInsertElement("; break;
- case Instruction::ShuffleVector: Out << "getShuffleVector("; break;
- default:
- error("Invalid constant expression");
- break;
+ break;
+ case Instruction::FCmp:
+ Out << "getFCmp(FCmpInst::FCMP_";
+ switch (CE->getPredicate()) {
+ case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
+ case FCmpInst::FCMP_ORD: Out << "ORD"; break;
+ case FCmpInst::FCMP_UNO: Out << "UNO"; break;
+ case FCmpInst::FCMP_OEQ: Out << "OEQ"; break;
+ case FCmpInst::FCMP_UEQ: Out << "UEQ"; break;
+ case FCmpInst::FCMP_ONE: Out << "ONE"; break;
+ case FCmpInst::FCMP_UNE: Out << "UNE"; break;
+ case FCmpInst::FCMP_OLT: Out << "OLT"; break;
+ case FCmpInst::FCMP_ULT: Out << "ULT"; break;
+ case FCmpInst::FCMP_OGT: Out << "OGT"; break;
+ case FCmpInst::FCMP_UGT: Out << "UGT"; break;
+ case FCmpInst::FCMP_OLE: Out << "OLE"; break;
+ case FCmpInst::FCMP_ULE: Out << "ULE"; break;
+ case FCmpInst::FCMP_OGE: Out << "OGE"; break;
+ case FCmpInst::FCMP_UGE: Out << "UGE"; break;
+ case FCmpInst::FCMP_TRUE: Out << "TRUE"; break;
+ default: error("Invalid FCmp Predicate");
}
- Out << getCppName(CE->getOperand(0));
- for (unsigned i = 1; i < CE->getNumOperands(); ++i)
- Out << ", " << getCppName(CE->getOperand(i));
- Out << ");";
+ break;
+ case Instruction::Shl: Out << "getShl("; break;
+ case Instruction::LShr: Out << "getLShr("; break;
+ case Instruction::AShr: Out << "getAShr("; break;
+ case Instruction::Select: Out << "getSelect("; break;
+ case Instruction::ExtractElement: Out << "getExtractElement("; break;
+ case Instruction::InsertElement: Out << "getInsertElement("; break;
+ case Instruction::ShuffleVector: Out << "getShuffleVector("; break;
+ default:
+ error("Invalid constant expression");
+ break;
}
- } else {
- error("Bad Constant");
- Out << "Constant* " << constName << " = 0; ";
+ Out << getCppName(CE->getOperand(0));
+ for (unsigned i = 1; i < CE->getNumOperands(); ++i)
+ Out << ", " << getCppName(CE->getOperand(i));
+ Out << ");";
}
- nl(Out);
+ } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
+ Out << "Constant* " << constName << " = ";
+ Out << "BlockAddress::get(" << getOpName(BA->getBasicBlock()) << ");";
+ } else {
+ error("Bad Constant");
+ Out << "Constant* " << constName << " = 0; ";
}
+ nl(Out);
+}
- void CppWriter::printConstants(const Module* M) {
- // Traverse all the global variables looking for constant initializers
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I)
- if (I->hasInitializer())
- printConstant(I->getInitializer());
-
- // Traverse the LLVM functions looking for constants
- for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
- // Add all of the basic blocks and instructions
- for (Function::const_iterator BB = FI->begin(),
- E = FI->end(); BB != E; ++BB) {
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
- ++I) {
- for (unsigned i = 0; i < I->getNumOperands(); ++i) {
- if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
- printConstant(C);
- }
+void CppWriter::printConstants(const Module* M) {
+ // Traverse all the global variables looking for constant initializers
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I)
+ if (I->hasInitializer())
+ printConstant(I->getInitializer());
+
+ // Traverse the LLVM functions looking for constants
+ for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+ FI != FE; ++FI) {
+ // Add all of the basic blocks and instructions
+ for (Function::const_iterator BB = FI->begin(),
+ E = FI->end(); BB != E; ++BB) {
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+ ++I) {
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+ if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
+ printConstant(C);
}
}
}
}
}
+}
- void CppWriter::printVariableUses(const GlobalVariable *GV) {
- nl(Out) << "// Type Definitions";
- nl(Out);
- printType(GV->getType());
- if (GV->hasInitializer()) {
- Constant *Init = GV->getInitializer();
- printType(Init->getType());
- if (Function *F = dyn_cast<Function>(Init)) {
- nl(Out)<< "/ Function Declarations"; nl(Out);
- printFunctionHead(F);
- } else if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
- nl(Out) << "// Global Variable Declarations"; nl(Out);
- printVariableHead(gv);
-
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- printVariableBody(gv);
- } else {
- nl(Out) << "// Constant Definitions"; nl(Out);
- printConstant(Init);
- }
+void CppWriter::printVariableUses(const GlobalVariable *GV) {
+ nl(Out) << "// Type Definitions";
+ nl(Out);
+ printType(GV->getType());
+ if (GV->hasInitializer()) {
+ Constant *Init = GV->getInitializer();
+ printType(Init->getType());
+ if (Function *F = dyn_cast<Function>(Init)) {
+ nl(Out)<< "/ Function Declarations"; nl(Out);
+ printFunctionHead(F);
+ } else if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
+ nl(Out) << "// Global Variable Declarations"; nl(Out);
+ printVariableHead(gv);
+
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ printVariableBody(gv);
+ } else {
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ printConstant(Init);
}
}
+}
- void CppWriter::printVariableHead(const GlobalVariable *GV) {
- nl(Out) << "GlobalVariable* " << getCppName(GV);
- if (is_inline) {
- Out << " = mod->getGlobalVariable(mod->getContext(), ";
- printEscapedString(GV->getName());
- Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
- nl(Out) << "if (!" << getCppName(GV) << ") {";
- in(); nl(Out) << getCppName(GV);
- }
- Out << " = new GlobalVariable(/*Module=*/*mod, ";
- nl(Out) << "/*Type=*/";
- printCppName(GV->getType()->getElementType());
- Out << ",";
- nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
- Out << ",";
- nl(Out) << "/*Linkage=*/";
- printLinkageType(GV->getLinkage());
- Out << ",";
- nl(Out) << "/*Initializer=*/0, ";
- if (GV->hasInitializer()) {
- Out << "// has initializer, specified below";
- }
- nl(Out) << "/*Name=*/\"";
+void CppWriter::printVariableHead(const GlobalVariable *GV) {
+ nl(Out) << "GlobalVariable* " << getCppName(GV);
+ if (is_inline) {
+ Out << " = mod->getGlobalVariable(mod->getContext(), ";
printEscapedString(GV->getName());
+ Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
+ nl(Out) << "if (!" << getCppName(GV) << ") {";
+ in(); nl(Out) << getCppName(GV);
+ }
+ Out << " = new GlobalVariable(/*Module=*/*mod, ";
+ nl(Out) << "/*Type=*/";
+ printCppName(GV->getType()->getElementType());
+ Out << ",";
+ nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
+ Out << ",";
+ nl(Out) << "/*Linkage=*/";
+ printLinkageType(GV->getLinkage());
+ Out << ",";
+ nl(Out) << "/*Initializer=*/0, ";
+ if (GV->hasInitializer()) {
+ Out << "// has initializer, specified below";
+ }
+ nl(Out) << "/*Name=*/\"";
+ printEscapedString(GV->getName());
+ Out << "\");";
+ nl(Out);
+
+ if (GV->hasSection()) {
+ printCppName(GV);
+ Out << "->setSection(\"";
+ printEscapedString(GV->getSection());
Out << "\");";
nl(Out);
-
- if (GV->hasSection()) {
- printCppName(GV);
- Out << "->setSection(\"";
- printEscapedString(GV->getSection());
- Out << "\");";
- nl(Out);
- }
- if (GV->getAlignment()) {
- printCppName(GV);
- Out << "->setAlignment(" << utostr(GV->getAlignment()) << ");";
- nl(Out);
- }
- if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
- printCppName(GV);
- Out << "->setVisibility(";
- printVisibilityType(GV->getVisibility());
- Out << ");";
- nl(Out);
- }
- if (GV->isThreadLocal()) {
- printCppName(GV);
- Out << "->setThreadLocal(true);";
- nl(Out);
- }
- if (is_inline) {
- out(); Out << "}"; nl(Out);
- }
}
-
- void CppWriter::printVariableBody(const GlobalVariable *GV) {
- if (GV->hasInitializer()) {
- printCppName(GV);
- Out << "->setInitializer(";
- Out << getCppName(GV->getInitializer()) << ");";
- nl(Out);
- }
+ if (GV->getAlignment()) {
+ printCppName(GV);
+ Out << "->setAlignment(" << utostr(GV->getAlignment()) << ");";
+ nl(Out);
}
+ if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
+ printCppName(GV);
+ Out << "->setVisibility(";
+ printVisibilityType(GV->getVisibility());
+ Out << ");";
+ nl(Out);
+ }
+ if (GV->isThreadLocal()) {
+ printCppName(GV);
+ Out << "->setThreadLocal(true);";
+ nl(Out);
+ }
+ if (is_inline) {
+ out(); Out << "}"; nl(Out);
+ }
+}
- std::string CppWriter::getOpName(Value* V) {
- if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
- return getCppName(V);
-
- // See if its alread in the map of forward references, if so just return the
- // name we already set up for it
- ForwardRefMap::const_iterator I = ForwardRefs.find(V);
- if (I != ForwardRefs.end())
- return I->second;
-
- // This is a new forward reference. Generate a unique name for it
- std::string result(std::string("fwdref_") + utostr(uniqueNum++));
-
- // Yes, this is a hack. An Argument is the smallest instantiable value that
- // we can make as a placeholder for the real value. We'll replace these
- // Argument instances later.
- Out << "Argument* " << result << " = new Argument("
- << getCppName(V->getType()) << ");";
+void CppWriter::printVariableBody(const GlobalVariable *GV) {
+ if (GV->hasInitializer()) {
+ printCppName(GV);
+ Out << "->setInitializer(";
+ Out << getCppName(GV->getInitializer()) << ");";
nl(Out);
- ForwardRefs[V] = result;
- return result;
}
+}
- // printInstruction - This member is called for each Instruction in a function.
- void CppWriter::printInstruction(const Instruction *I,
- const std::string& bbname) {
- std::string iName(getCppName(I));
+std::string CppWriter::getOpName(Value* V) {
+ if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
+ return getCppName(V);
- // Before we emit this instruction, we need to take care of generating any
- // forward references. So, we get the names of all the operands in advance
- const unsigned Ops(I->getNumOperands());
- std::string* opNames = new std::string[Ops];
- for (unsigned i = 0; i < Ops; i++) {
- opNames[i] = getOpName(I->getOperand(i));
- }
+ // See if its alread in the map of forward references, if so just return the
+ // name we already set up for it
+ ForwardRefMap::const_iterator I = ForwardRefs.find(V);
+ if (I != ForwardRefs.end())
+ return I->second;
- switch (I->getOpcode()) {
- default:
- error("Invalid instruction");
- break;
+ // This is a new forward reference. Generate a unique name for it
+ std::string result(std::string("fwdref_") + utostr(uniqueNum++));
- case Instruction::Ret: {
- const ReturnInst* ret = cast<ReturnInst>(I);
- Out << "ReturnInst::Create(mod->getContext(), "
- << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
- break;
+ // Yes, this is a hack. An Argument is the smallest instantiable value that
+ // we can make as a placeholder for the real value. We'll replace these
+ // Argument instances later.
+ Out << "Argument* " << result << " = new Argument("
+ << getCppName(V->getType()) << ");";
+ nl(Out);
+ ForwardRefs[V] = result;
+ return result;
+}
+
+// printInstruction - This member is called for each Instruction in a function.
+void CppWriter::printInstruction(const Instruction *I,
+ const std::string& bbname) {
+ std::string iName(getCppName(I));
+
+ // Before we emit this instruction, we need to take care of generating any
+ // forward references. So, we get the names of all the operands in advance
+ const unsigned Ops(I->getNumOperands());
+ std::string* opNames = new std::string[Ops];
+ for (unsigned i = 0; i < Ops; i++)
+ opNames[i] = getOpName(I->getOperand(i));
+
+ switch (I->getOpcode()) {
+ default:
+ error("Invalid instruction");
+ break;
+
+ case Instruction::Ret: {
+ const ReturnInst* ret = cast<ReturnInst>(I);
+ Out << "ReturnInst::Create(mod->getContext(), "
+ << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
+ break;
+ }
+ case Instruction::Br: {
+ const BranchInst* br = cast<BranchInst>(I);
+ Out << "BranchInst::Create(" ;
+ if (br->getNumOperands() == 3) {
+ Out << opNames[2] << ", "
+ << opNames[1] << ", "
+ << opNames[0] << ", ";
+
+ } else if (br->getNumOperands() == 1) {
+ Out << opNames[0] << ", ";
+ } else {
+ error("Branch with 2 operands?");
}
- case Instruction::Br: {
- const BranchInst* br = cast<BranchInst>(I);
- Out << "BranchInst::Create(" ;
- if (br->getNumOperands() == 3 ) {
- Out << opNames[2] << ", "
- << opNames[1] << ", "
- << opNames[0] << ", ";
-
- } else if (br->getNumOperands() == 1) {
- Out << opNames[0] << ", ";
- } else {
- error("Branch with 2 operands?");
- }
- Out << bbname << ");";
- break;
+ Out << bbname << ");";
+ break;
+ }
+ case Instruction::Switch: {
+ const SwitchInst *SI = cast<SwitchInst>(I);
+ Out << "SwitchInst* " << iName << " = SwitchInst::Create("
+ << opNames[0] << ", "
+ << opNames[1] << ", "
+ << SI->getNumCases() << ", " << bbname << ");";
+ nl(Out);
+ for (unsigned i = 2; i != SI->getNumOperands(); i += 2) {
+ Out << iName << "->addCase("
+ << opNames[i] << ", "
+ << opNames[i+1] << ");";
+ nl(Out);
}
- case Instruction::Switch: {
- const SwitchInst *SI = cast<SwitchInst>(I);
- Out << "SwitchInst* " << iName << " = SwitchInst::Create("
- << opNames[0] << ", "
- << opNames[1] << ", "
- << SI->getNumCases() << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::IndirectBr: {
+ const IndirectBrInst *IBI = cast<IndirectBrInst>(I);
+ Out << "IndirectBrInst *" << iName << " = IndirectBrInst::Create("
+ << opNames[0] << ", " << IBI->getNumDestinations() << ");";
+ nl(Out);
+ for (unsigned i = 1; i != IBI->getNumOperands(); ++i) {
+ Out << iName << "->addDestination(" << opNames[i] << ");";
nl(Out);
- for (unsigned i = 2; i != SI->getNumOperands(); i += 2) {
- Out << iName << "->addCase("
- << opNames[i] << ", "
- << opNames[i+1] << ");";
- nl(Out);
- }
- break;
}
- case Instruction::IndirectBr: {
- const IndirectBrInst *IBI = cast<IndirectBrInst>(I);
- Out << "IndirectBrInst *" << iName << " = IndirectBrInst::Create("
- << opNames[0] << ", " << IBI->getNumDestinations() << ");";
+ break;
+ }
+ case Instruction::Invoke: {
+ const InvokeInst* inv = cast<InvokeInst>(I);
+ Out << "std::vector<Value*> " << iName << "_params;";
+ nl(Out);
+ for (unsigned i = 0; i < inv->getNumArgOperands(); ++i) {
+ Out << iName << "_params.push_back("
+ << getOpName(inv->getArgOperand(i)) << ");";
nl(Out);
- for (unsigned i = 1; i != IBI->getNumOperands(); ++i) {
- Out << iName << "->addDestination(" << opNames[i] << ");";
- nl(Out);
- }
- break;
}
- case Instruction::Invoke: {
- const InvokeInst* inv = cast<InvokeInst>(I);
- Out << "std::vector<Value*> " << iName << "_params;";
+ // FIXME: This shouldn't use magic numbers -3, -2, and -1.
+ Out << "InvokeInst *" << iName << " = InvokeInst::Create("
+ << getOpName(inv->getCalledFunction()) << ", "
+ << getOpName(inv->getNormalDest()) << ", "
+ << getOpName(inv->getUnwindDest()) << ", "
+ << iName << "_params.begin(), "
+ << iName << "_params.end(), \"";
+ printEscapedString(inv->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->setCallingConv(";
+ printCallingConv(inv->getCallingConv());
+ Out << ");";
+ printAttributes(inv->getAttributes(), iName);
+ Out << iName << "->setAttributes(" << iName << "_PAL);";
+ nl(Out);
+ break;
+ }
+ case Instruction::Unwind: {
+ Out << "new UnwindInst("
+ << bbname << ");";
+ break;
+ }
+ case Instruction::Unreachable: {
+ Out << "new UnreachableInst("
+ << "mod->getContext(), "
+ << bbname << ");";
+ break;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:{
+ Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
+ switch (I->getOpcode()) {
+ case Instruction::Add: Out << "Instruction::Add"; break;
+ case Instruction::FAdd: Out << "Instruction::FAdd"; break;
+ case Instruction::Sub: Out << "Instruction::Sub"; break;
+ case Instruction::FSub: Out << "Instruction::FSub"; break;
+ case Instruction::Mul: Out << "Instruction::Mul"; break;
+ case Instruction::FMul: Out << "Instruction::FMul"; break;
+ case Instruction::UDiv:Out << "Instruction::UDiv"; break;
+ case Instruction::SDiv:Out << "Instruction::SDiv"; break;
+ case Instruction::FDiv:Out << "Instruction::FDiv"; break;
+ case Instruction::URem:Out << "Instruction::URem"; break;
+ case Instruction::SRem:Out << "Instruction::SRem"; break;
+ case Instruction::FRem:Out << "Instruction::FRem"; break;
+ case Instruction::And: Out << "Instruction::And"; break;
+ case Instruction::Or: Out << "Instruction::Or"; break;
+ case Instruction::Xor: Out << "Instruction::Xor"; break;
+ case Instruction::Shl: Out << "Instruction::Shl"; break;
+ case Instruction::LShr:Out << "Instruction::LShr"; break;
+ case Instruction::AShr:Out << "Instruction::AShr"; break;
+ default: Out << "Instruction::BadOpCode"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::FCmp: {
+ Out << "FCmpInst* " << iName << " = new FCmpInst(*" << bbname << ", ";
+ switch (cast<FCmpInst>(I)->getPredicate()) {
+ case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
+ case FCmpInst::FCMP_OEQ : Out << "FCmpInst::FCMP_OEQ"; break;
+ case FCmpInst::FCMP_OGT : Out << "FCmpInst::FCMP_OGT"; break;
+ case FCmpInst::FCMP_OGE : Out << "FCmpInst::FCMP_OGE"; break;
+ case FCmpInst::FCMP_OLT : Out << "FCmpInst::FCMP_OLT"; break;
+ case FCmpInst::FCMP_OLE : Out << "FCmpInst::FCMP_OLE"; break;
+ case FCmpInst::FCMP_ONE : Out << "FCmpInst::FCMP_ONE"; break;
+ case FCmpInst::FCMP_ORD : Out << "FCmpInst::FCMP_ORD"; break;
+ case FCmpInst::FCMP_UNO : Out << "FCmpInst::FCMP_UNO"; break;
+ case FCmpInst::FCMP_UEQ : Out << "FCmpInst::FCMP_UEQ"; break;
+ case FCmpInst::FCMP_UGT : Out << "FCmpInst::FCMP_UGT"; break;
+ case FCmpInst::FCMP_UGE : Out << "FCmpInst::FCMP_UGE"; break;
+ case FCmpInst::FCMP_ULT : Out << "FCmpInst::FCMP_ULT"; break;
+ case FCmpInst::FCMP_ULE : Out << "FCmpInst::FCMP_ULE"; break;
+ case FCmpInst::FCMP_UNE : Out << "FCmpInst::FCMP_UNE"; break;
+ case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
+ default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\");";
+ break;
+ }
+ case Instruction::ICmp: {
+ Out << "ICmpInst* " << iName << " = new ICmpInst(*" << bbname << ", ";
+ switch (cast<ICmpInst>(I)->getPredicate()) {
+ case ICmpInst::ICMP_EQ: Out << "ICmpInst::ICMP_EQ"; break;
+ case ICmpInst::ICMP_NE: Out << "ICmpInst::ICMP_NE"; break;
+ case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
+ case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
+ case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
+ case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
+ case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
+ case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
+ case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
+ case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
+ default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
+ }
+ Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+ printEscapedString(I->getName());
+ Out << "\");";
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst* allocaI = cast<AllocaInst>(I);
+ Out << "AllocaInst* " << iName << " = new AllocaInst("
+ << getCppName(allocaI->getAllocatedType()) << ", ";
+ if (allocaI->isArrayAllocation())
+ Out << opNames[0] << ", ";
+ Out << "\"";
+ printEscapedString(allocaI->getName());
+ Out << "\", " << bbname << ");";
+ if (allocaI->getAlignment())
+ nl(Out) << iName << "->setAlignment("
+ << allocaI->getAlignment() << ");";
+ break;
+ }
+ case Instruction::Load: {
+ const LoadInst* load = cast<LoadInst>(I);
+ Out << "LoadInst* " << iName << " = new LoadInst("
+ << opNames[0] << ", \"";
+ printEscapedString(load->getName());
+ Out << "\", " << (load->isVolatile() ? "true" : "false" )
+ << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::Store: {
+ const StoreInst* store = cast<StoreInst>(I);
+ Out << " new StoreInst("
+ << opNames[0] << ", "
+ << opNames[1] << ", "
+ << (store->isVolatile() ? "true" : "false")
+ << ", " << bbname << ");";
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
+ if (gep->getNumOperands() <= 2) {
+ Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
+ << opNames[0];
+ if (gep->getNumOperands() == 2)
+ Out << ", " << opNames[1];
+ } else {
+ Out << "std::vector<Value*> " << iName << "_indices;";
nl(Out);
- for (unsigned i = 0; i < inv->getNumOperands() - 3; ++i) {
- Out << iName << "_params.push_back("
+ for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
+ Out << iName << "_indices.push_back("
<< opNames[i] << ");";
nl(Out);
}
- Out << "InvokeInst *" << iName << " = InvokeInst::Create("
- << opNames[Ops - 3] << ", "
- << opNames[Ops - 2] << ", "
- << opNames[Ops - 1] << ", "
- << iName << "_params.begin(), " << iName << "_params.end(), \"";
- printEscapedString(inv->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->setCallingConv(";
- printCallingConv(inv->getCallingConv());
- Out << ");";
- printAttributes(inv->getAttributes(), iName);
- Out << iName << "->setAttributes(" << iName << "_PAL);";
+ Out << "Instruction* " << iName << " = GetElementPtrInst::Create("
+ << opNames[0] << ", " << iName << "_indices.begin(), "
+ << iName << "_indices.end()";
+ }
+ Out << ", \"";
+ printEscapedString(gep->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::PHI: {
+ const PHINode* phi = cast<PHINode>(I);
+
+ Out << "PHINode* " << iName << " = PHINode::Create("
+ << getCppName(phi->getType()) << ", \"";
+ printEscapedString(phi->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->reserveOperandSpace("
+ << phi->getNumIncomingValues()
+ << ");";
+ nl(Out);
+ for (unsigned i = 0; i < phi->getNumOperands(); i+=2) {
+ Out << iName << "->addIncoming("
+ << opNames[i] << ", " << opNames[i+1] << ");";
nl(Out);
- break;
- }
- case Instruction::Unwind: {
- Out << "new UnwindInst("
- << bbname << ");";
- break;
- }
- case Instruction::Unreachable: {
- Out << "new UnreachableInst("
- << "mod->getContext(), "
- << bbname << ");";
- break;
- }
- case Instruction::Add:
- case Instruction::FAdd:
- case Instruction::Sub:
- case Instruction::FSub:
- case Instruction::Mul:
- case Instruction::FMul:
- case Instruction::UDiv:
- case Instruction::SDiv:
- case Instruction::FDiv:
- case Instruction::URem:
- case Instruction::SRem:
- case Instruction::FRem:
- case Instruction::And:
- case Instruction::Or:
- case Instruction::Xor:
- case Instruction::Shl:
- case Instruction::LShr:
- case Instruction::AShr:{
- Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
- switch (I->getOpcode()) {
- case Instruction::Add: Out << "Instruction::Add"; break;
- case Instruction::FAdd: Out << "Instruction::FAdd"; break;
- case Instruction::Sub: Out << "Instruction::Sub"; break;
- case Instruction::FSub: Out << "Instruction::FSub"; break;
- case Instruction::Mul: Out << "Instruction::Mul"; break;
- case Instruction::FMul: Out << "Instruction::FMul"; break;
- case Instruction::UDiv:Out << "Instruction::UDiv"; break;
- case Instruction::SDiv:Out << "Instruction::SDiv"; break;
- case Instruction::FDiv:Out << "Instruction::FDiv"; break;
- case Instruction::URem:Out << "Instruction::URem"; break;
- case Instruction::SRem:Out << "Instruction::SRem"; break;
- case Instruction::FRem:Out << "Instruction::FRem"; break;
- case Instruction::And: Out << "Instruction::And"; break;
- case Instruction::Or: Out << "Instruction::Or"; break;
- case Instruction::Xor: Out << "Instruction::Xor"; break;
- case Instruction::Shl: Out << "Instruction::Shl"; break;
- case Instruction::LShr:Out << "Instruction::LShr"; break;
- case Instruction::AShr:Out << "Instruction::AShr"; break;
- default: Out << "Instruction::BadOpCode"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\", " << bbname << ");";
- break;
}
- case Instruction::FCmp: {
- Out << "FCmpInst* " << iName << " = new FCmpInst(*" << bbname << ", ";
- switch (cast<FCmpInst>(I)->getPredicate()) {
- case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
- case FCmpInst::FCMP_OEQ : Out << "FCmpInst::FCMP_OEQ"; break;
- case FCmpInst::FCMP_OGT : Out << "FCmpInst::FCMP_OGT"; break;
- case FCmpInst::FCMP_OGE : Out << "FCmpInst::FCMP_OGE"; break;
- case FCmpInst::FCMP_OLT : Out << "FCmpInst::FCMP_OLT"; break;
- case FCmpInst::FCMP_OLE : Out << "FCmpInst::FCMP_OLE"; break;
- case FCmpInst::FCMP_ONE : Out << "FCmpInst::FCMP_ONE"; break;
- case FCmpInst::FCMP_ORD : Out << "FCmpInst::FCMP_ORD"; break;
- case FCmpInst::FCMP_UNO : Out << "FCmpInst::FCMP_UNO"; break;
- case FCmpInst::FCMP_UEQ : Out << "FCmpInst::FCMP_UEQ"; break;
- case FCmpInst::FCMP_UGT : Out << "FCmpInst::FCMP_UGT"; break;
- case FCmpInst::FCMP_UGE : Out << "FCmpInst::FCMP_UGE"; break;
- case FCmpInst::FCMP_ULT : Out << "FCmpInst::FCMP_ULT"; break;
- case FCmpInst::FCMP_ULE : Out << "FCmpInst::FCMP_ULE"; break;
- case FCmpInst::FCMP_UNE : Out << "FCmpInst::FCMP_UNE"; break;
- case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
- default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\");";
- break;
- }
- case Instruction::ICmp: {
- Out << "ICmpInst* " << iName << " = new ICmpInst(*" << bbname << ", ";
- switch (cast<ICmpInst>(I)->getPredicate()) {
- case ICmpInst::ICMP_EQ: Out << "ICmpInst::ICMP_EQ"; break;
- case ICmpInst::ICMP_NE: Out << "ICmpInst::ICMP_NE"; break;
- case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
- case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
- case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
- case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
- case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
- case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
- case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
- case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
- default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
- }
- Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
- printEscapedString(I->getName());
- Out << "\");";
- break;
- }
- case Instruction::Alloca: {
- const AllocaInst* allocaI = cast<AllocaInst>(I);
- Out << "AllocaInst* " << iName << " = new AllocaInst("
- << getCppName(allocaI->getAllocatedType()) << ", ";
- if (allocaI->isArrayAllocation())
- Out << opNames[0] << ", ";
- Out << "\"";
- printEscapedString(allocaI->getName());
- Out << "\", " << bbname << ");";
- if (allocaI->getAlignment())
- nl(Out) << iName << "->setAlignment("
- << allocaI->getAlignment() << ");";
- break;
- }
- case Instruction::Load:{
- const LoadInst* load = cast<LoadInst>(I);
- Out << "LoadInst* " << iName << " = new LoadInst("
- << opNames[0] << ", \"";
- printEscapedString(load->getName());
- Out << "\", " << (load->isVolatile() ? "true" : "false" )
- << ", " << bbname << ");";
- break;
- }
- case Instruction::Store: {
- const StoreInst* store = cast<StoreInst>(I);
- Out << " new StoreInst("
- << opNames[0] << ", "
- << opNames[1] << ", "
- << (store->isVolatile() ? "true" : "false")
- << ", " << bbname << ");";
- break;
- }
- case Instruction::GetElementPtr: {
- const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
- if (gep->getNumOperands() <= 2) {
- Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
- << opNames[0];
- if (gep->getNumOperands() == 2)
- Out << ", " << opNames[1];
- } else {
- Out << "std::vector<Value*> " << iName << "_indices;";
- nl(Out);
- for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
- Out << iName << "_indices.push_back("
- << opNames[i] << ");";
- nl(Out);
- }
- Out << "Instruction* " << iName << " = GetElementPtrInst::Create("
- << opNames[0] << ", " << iName << "_indices.begin(), "
- << iName << "_indices.end()";
- }
- Out << ", \"";
- printEscapedString(gep->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::PHI: {
- const PHINode* phi = cast<PHINode>(I);
-
- Out << "PHINode* " << iName << " = PHINode::Create("
- << getCppName(phi->getType()) << ", \"";
- printEscapedString(phi->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->reserveOperandSpace("
- << phi->getNumIncomingValues()
- << ");";
+ break;
+ }
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast: {
+ const CastInst* cst = cast<CastInst>(I);
+ Out << "CastInst* " << iName << " = new ";
+ switch (I->getOpcode()) {
+ case Instruction::Trunc: Out << "TruncInst"; break;
+ case Instruction::ZExt: Out << "ZExtInst"; break;
+ case Instruction::SExt: Out << "SExtInst"; break;
+ case Instruction::FPTrunc: Out << "FPTruncInst"; break;
+ case Instruction::FPExt: Out << "FPExtInst"; break;
+ case Instruction::FPToUI: Out << "FPToUIInst"; break;
+ case Instruction::FPToSI: Out << "FPToSIInst"; break;
+ case Instruction::UIToFP: Out << "UIToFPInst"; break;
+ case Instruction::SIToFP: Out << "SIToFPInst"; break;
+ case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
+ case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
+ case Instruction::BitCast: Out << "BitCastInst"; break;
+ default: assert(!"Unreachable"); break;
+ }
+ Out << "(" << opNames[0] << ", "
+ << getCppName(cst->getType()) << ", \"";
+ printEscapedString(cst->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::Call: {
+ const CallInst* call = cast<CallInst>(I);
+ if (const InlineAsm* ila = dyn_cast<InlineAsm>(call->getCalledValue())) {
+ Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
+ << getCppName(ila->getFunctionType()) << ", \""
+ << ila->getAsmString() << "\", \""
+ << ila->getConstraintString() << "\","
+ << (ila->hasSideEffects() ? "true" : "false") << ");";
nl(Out);
- for (unsigned i = 0; i < phi->getNumOperands(); i+=2) {
- Out << iName << "->addIncoming("
- << opNames[i] << ", " << opNames[i+1] << ");";
- nl(Out);
- }
- break;
- }
- case Instruction::Trunc:
- case Instruction::ZExt:
- case Instruction::SExt:
- case Instruction::FPTrunc:
- case Instruction::FPExt:
- case Instruction::FPToUI:
- case Instruction::FPToSI:
- case Instruction::UIToFP:
- case Instruction::SIToFP:
- case Instruction::PtrToInt:
- case Instruction::IntToPtr:
- case Instruction::BitCast: {
- const CastInst* cst = cast<CastInst>(I);
- Out << "CastInst* " << iName << " = new ";
- switch (I->getOpcode()) {
- case Instruction::Trunc: Out << "TruncInst"; break;
- case Instruction::ZExt: Out << "ZExtInst"; break;
- case Instruction::SExt: Out << "SExtInst"; break;
- case Instruction::FPTrunc: Out << "FPTruncInst"; break;
- case Instruction::FPExt: Out << "FPExtInst"; break;
- case Instruction::FPToUI: Out << "FPToUIInst"; break;
- case Instruction::FPToSI: Out << "FPToSIInst"; break;
- case Instruction::UIToFP: Out << "UIToFPInst"; break;
- case Instruction::SIToFP: Out << "SIToFPInst"; break;
- case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
- case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
- case Instruction::BitCast: Out << "BitCastInst"; break;
- default: assert(!"Unreachable"); break;
- }
- Out << "(" << opNames[0] << ", "
- << getCppName(cst->getType()) << ", \"";
- printEscapedString(cst->getName());
- Out << "\", " << bbname << ");";
- break;
}
- case Instruction::Call:{
- const CallInst* call = cast<CallInst>(I);
- if (const InlineAsm* ila = dyn_cast<InlineAsm>(call->getCalledValue())) {
- Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
- << getCppName(ila->getFunctionType()) << ", \""
- << ila->getAsmString() << "\", \""
- << ila->getConstraintString() << "\","
- << (ila->hasSideEffects() ? "true" : "false") << ");";
- nl(Out);
- }
- if (call->getNumOperands() > 2) {
- Out << "std::vector<Value*> " << iName << "_params;";
+ if (call->getNumArgOperands() > 1) {
+ Out << "std::vector<Value*> " << iName << "_params;";
+ nl(Out);
+ for (unsigned i = 0; i < call->getNumArgOperands(); ++i) {
+ Out << iName << "_params.push_back(" << opNames[i] << ");";
nl(Out);
- for (unsigned i = 1; i < call->getNumOperands(); ++i) {
- Out << iName << "_params.push_back(" << opNames[i] << ");";
- nl(Out);
- }
- Out << "CallInst* " << iName << " = CallInst::Create("
- << opNames[0] << ", " << iName << "_params.begin(), "
- << iName << "_params.end(), \"";
- } else if (call->getNumOperands() == 2) {
- Out << "CallInst* " << iName << " = CallInst::Create("
- << opNames[0] << ", " << opNames[1] << ", \"";
- } else {
- Out << "CallInst* " << iName << " = CallInst::Create(" << opNames[0]
- << ", \"";
}
- printEscapedString(call->getName());
- Out << "\", " << bbname << ");";
- nl(Out) << iName << "->setCallingConv(";
- printCallingConv(call->getCallingConv());
- Out << ");";
- nl(Out) << iName << "->setTailCall("
- << (call->isTailCall() ? "true":"false");
- Out << ");";
- printAttributes(call->getAttributes(), iName);
- Out << iName << "->setAttributes(" << iName << "_PAL);";
- nl(Out);
- break;
- }
- case Instruction::Select: {
- const SelectInst* sel = cast<SelectInst>(I);
- Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
- Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(sel->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::UserOp1:
- /// FALL THROUGH
- case Instruction::UserOp2: {
- /// FIXME: What should be done here?
- break;
- }
- case Instruction::VAArg: {
- const VAArgInst* va = cast<VAArgInst>(I);
- Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
- << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
- printEscapedString(va->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::ExtractElement: {
- const ExtractElementInst* eei = cast<ExtractElementInst>(I);
- Out << "ExtractElementInst* " << getCppName(eei)
- << " = new ExtractElementInst(" << opNames[0]
- << ", " << opNames[1] << ", \"";
- printEscapedString(eei->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::InsertElement: {
- const InsertElementInst* iei = cast<InsertElementInst>(I);
- Out << "InsertElementInst* " << getCppName(iei)
- << " = InsertElementInst::Create(" << opNames[0]
- << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(iei->getName());
- Out << "\", " << bbname << ");";
- break;
- }
- case Instruction::ShuffleVector: {
- const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
- Out << "ShuffleVectorInst* " << getCppName(svi)
- << " = new ShuffleVectorInst(" << opNames[0]
- << ", " << opNames[1] << ", " << opNames[2] << ", \"";
- printEscapedString(svi->getName());
- Out << "\", " << bbname << ");";
- break;
+ Out << "CallInst* " << iName << " = CallInst::Create("
+ << opNames[call->getNumArgOperands()] << ", " << iName << "_params.begin(), "
+ << iName << "_params.end(), \"";
+ } else if (call->getNumArgOperands() == 1) {
+ Out << "CallInst* " << iName << " = CallInst::Create("
+ << opNames[call->getNumArgOperands()] << ", " << opNames[0] << ", \"";
+ } else {
+ Out << "CallInst* " << iName << " = CallInst::Create("
+ << opNames[call->getNumArgOperands()] << ", \"";
}
- case Instruction::ExtractValue: {
- const ExtractValueInst *evi = cast<ExtractValueInst>(I);
- Out << "std::vector<unsigned> " << iName << "_indices;";
+ printEscapedString(call->getName());
+ Out << "\", " << bbname << ");";
+ nl(Out) << iName << "->setCallingConv(";
+ printCallingConv(call->getCallingConv());
+ Out << ");";
+ nl(Out) << iName << "->setTailCall("
+ << (call->isTailCall() ? "true" : "false");
+ Out << ");";
+ nl(Out);
+ printAttributes(call->getAttributes(), iName);
+ Out << iName << "->setAttributes(" << iName << "_PAL);";
+ nl(Out);
+ break;
+ }
+ case Instruction::Select: {
+ const SelectInst* sel = cast<SelectInst>(I);
+ Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
+ Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(sel->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::UserOp1:
+ /// FALL THROUGH
+ case Instruction::UserOp2: {
+ /// FIXME: What should be done here?
+ break;
+ }
+ case Instruction::VAArg: {
+ const VAArgInst* va = cast<VAArgInst>(I);
+ Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
+ << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
+ printEscapedString(va->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ExtractElement: {
+ const ExtractElementInst* eei = cast<ExtractElementInst>(I);
+ Out << "ExtractElementInst* " << getCppName(eei)
+ << " = new ExtractElementInst(" << opNames[0]
+ << ", " << opNames[1] << ", \"";
+ printEscapedString(eei->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::InsertElement: {
+ const InsertElementInst* iei = cast<InsertElementInst>(I);
+ Out << "InsertElementInst* " << getCppName(iei)
+ << " = InsertElementInst::Create(" << opNames[0]
+ << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(iei->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ShuffleVector: {
+ const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
+ Out << "ShuffleVectorInst* " << getCppName(svi)
+ << " = new ShuffleVectorInst(" << opNames[0]
+ << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+ printEscapedString(svi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::ExtractValue: {
+ const ExtractValueInst *evi = cast<ExtractValueInst>(I);
+ Out << "std::vector<unsigned> " << iName << "_indices;";
+ nl(Out);
+ for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
+ Out << iName << "_indices.push_back("
+ << evi->idx_begin()[i] << ");";
nl(Out);
- for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
- Out << iName << "_indices.push_back("
- << evi->idx_begin()[i] << ");";
- nl(Out);
- }
- Out << "ExtractValueInst* " << getCppName(evi)
- << " = ExtractValueInst::Create(" << opNames[0]
- << ", "
- << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
- printEscapedString(evi->getName());
- Out << "\", " << bbname << ");";
- break;
}
- case Instruction::InsertValue: {
- const InsertValueInst *ivi = cast<InsertValueInst>(I);
- Out << "std::vector<unsigned> " << iName << "_indices;";
+ Out << "ExtractValueInst* " << getCppName(evi)
+ << " = ExtractValueInst::Create(" << opNames[0]
+ << ", "
+ << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+ printEscapedString(evi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
+ case Instruction::InsertValue: {
+ const InsertValueInst *ivi = cast<InsertValueInst>(I);
+ Out << "std::vector<unsigned> " << iName << "_indices;";
+ nl(Out);
+ for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
+ Out << iName << "_indices.push_back("
+ << ivi->idx_begin()[i] << ");";
nl(Out);
- for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
- Out << iName << "_indices.push_back("
- << ivi->idx_begin()[i] << ");";
- nl(Out);
- }
- Out << "InsertValueInst* " << getCppName(ivi)
- << " = InsertValueInst::Create(" << opNames[0]
- << ", " << opNames[1] << ", "
- << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
- printEscapedString(ivi->getName());
- Out << "\", " << bbname << ");";
- break;
}
+ Out << "InsertValueInst* " << getCppName(ivi)
+ << " = InsertValueInst::Create(" << opNames[0]
+ << ", " << opNames[1] << ", "
+ << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+ printEscapedString(ivi->getName());
+ Out << "\", " << bbname << ");";
+ break;
+ }
}
DefinedValues.insert(I);
nl(Out);
delete [] opNames;
}
- // Print out the types, constants and declarations needed by one function
- void CppWriter::printFunctionUses(const Function* F) {
- nl(Out) << "// Type Definitions"; nl(Out);
- if (!is_inline) {
- // Print the function's return type
- printType(F->getReturnType());
+// Print out the types, constants and declarations needed by one function
+void CppWriter::printFunctionUses(const Function* F) {
+ nl(Out) << "// Type Definitions"; nl(Out);
+ if (!is_inline) {
+ // Print the function's return type
+ printType(F->getReturnType());
- // Print the function's function type
- printType(F->getFunctionType());
+ // Print the function's function type
+ printType(F->getFunctionType());
- // Print the types of each of the function's arguments
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- printType(AI->getType());
- }
+ // Print the types of each of the function's arguments
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ printType(AI->getType());
}
+ }
- // Print type definitions for every type referenced by an instruction and
- // make a note of any global values or constants that are referenced
- SmallPtrSet<GlobalValue*,64> gvs;
- SmallPtrSet<Constant*,64> consts;
- for (Function::const_iterator BB = F->begin(), BE = F->end();
- BB != BE; ++BB){
- for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
- I != E; ++I) {
- // Print the type of the instruction itself
- printType(I->getType());
+ // Print type definitions for every type referenced by an instruction and
+ // make a note of any global values or constants that are referenced
+ SmallPtrSet<GlobalValue*,64> gvs;
+ SmallPtrSet<Constant*,64> consts;
+ for (Function::const_iterator BB = F->begin(), BE = F->end();
+ BB != BE; ++BB){
+ for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ // Print the type of the instruction itself
+ printType(I->getType());
- // Print the type of each of the instruction's operands
- for (unsigned i = 0; i < I->getNumOperands(); ++i) {
- Value* operand = I->getOperand(i);
- printType(operand->getType());
-
- // If the operand references a GVal or Constant, make a note of it
- if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
- gvs.insert(GV);
- if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
- if (GVar->hasInitializer())
- consts.insert(GVar->getInitializer());
- } else if (Constant* C = dyn_cast<Constant>(operand))
- consts.insert(C);
- }
+ // Print the type of each of the instruction's operands
+ for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+ Value* operand = I->getOperand(i);
+ printType(operand->getType());
+
+ // If the operand references a GVal or Constant, make a note of it
+ if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
+ gvs.insert(GV);
+ if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+ if (GVar->hasInitializer())
+ consts.insert(GVar->getInitializer());
+ } else if (Constant* C = dyn_cast<Constant>(operand))
+ consts.insert(C);
}
}
+ }
- // Print the function declarations for any functions encountered
- nl(Out) << "// Function Declarations"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (Function* Fun = dyn_cast<Function>(*I)) {
- if (!is_inline || Fun != F)
- printFunctionHead(Fun);
- }
+ // Print the function declarations for any functions encountered
+ nl(Out) << "// Function Declarations"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (Function* Fun = dyn_cast<Function>(*I)) {
+ if (!is_inline || Fun != F)
+ printFunctionHead(Fun);
}
+ }
- // Print the global variable declarations for any variables encountered
- nl(Out) << "// Global Variable Declarations"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
- printVariableHead(F);
- }
+ // Print the global variable declarations for any variables encountered
+ nl(Out) << "// Global Variable Declarations"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
+ printVariableHead(F);
+ }
- // Print the constants found
- nl(Out) << "// Constant Definitions"; nl(Out);
- for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
- E = consts.end(); I != E; ++I) {
- printConstant(*I);
- }
+// Print the constants found
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
+ E = consts.end(); I != E; ++I) {
+ printConstant(*I);
+ }
- // Process the global variables definitions now that all the constants have
- // been emitted. These definitions just couple the gvars with their constant
- // initializers.
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
- I != E; ++I) {
- if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
- printVariableBody(GV);
- }
+ // Process the global variables definitions now that all the constants have
+ // been emitted. These definitions just couple the gvars with their constant
+ // initializers.
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+ I != E; ++I) {
+ if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
+ printVariableBody(GV);
}
+}
- void CppWriter::printFunctionHead(const Function* F) {
- nl(Out) << "Function* " << getCppName(F);
- if (is_inline) {
- Out << " = mod->getFunction(\"";
- printEscapedString(F->getName());
- Out << "\", " << getCppName(F->getFunctionType()) << ");";
- nl(Out) << "if (!" << getCppName(F) << ") {";
- nl(Out) << getCppName(F);
- }
- Out<< " = Function::Create(";
- nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
- nl(Out) << "/*Linkage=*/";
- printLinkageType(F->getLinkage());
- Out << ",";
- nl(Out) << "/*Name=*/\"";
+void CppWriter::printFunctionHead(const Function* F) {
+ nl(Out) << "Function* " << getCppName(F);
+ if (is_inline) {
+ Out << " = mod->getFunction(\"";
printEscapedString(F->getName());
- Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
- nl(Out,-1);
+ Out << "\", " << getCppName(F->getFunctionType()) << ");";
+ nl(Out) << "if (!" << getCppName(F) << ") {";
+ nl(Out) << getCppName(F);
+ }
+ Out<< " = Function::Create(";
+ nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
+ nl(Out) << "/*Linkage=*/";
+ printLinkageType(F->getLinkage());
+ Out << ",";
+ nl(Out) << "/*Name=*/\"";
+ printEscapedString(F->getName());
+ Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
+ nl(Out,-1);
+ printCppName(F);
+ Out << "->setCallingConv(";
+ printCallingConv(F->getCallingConv());
+ Out << ");";
+ nl(Out);
+ if (F->hasSection()) {
+ printCppName(F);
+ Out << "->setSection(\"" << F->getSection() << "\");";
+ nl(Out);
+ }
+ if (F->getAlignment()) {
+ printCppName(F);
+ Out << "->setAlignment(" << F->getAlignment() << ");";
+ nl(Out);
+ }
+ if (F->getVisibility() != GlobalValue::DefaultVisibility) {
printCppName(F);
- Out << "->setCallingConv(";
- printCallingConv(F->getCallingConv());
+ Out << "->setVisibility(";
+ printVisibilityType(F->getVisibility());
Out << ");";
nl(Out);
- if (F->hasSection()) {
- printCppName(F);
- Out << "->setSection(\"" << F->getSection() << "\");";
- nl(Out);
- }
- if (F->getAlignment()) {
- printCppName(F);
- Out << "->setAlignment(" << F->getAlignment() << ");";
- nl(Out);
- }
- if (F->getVisibility() != GlobalValue::DefaultVisibility) {
- printCppName(F);
- Out << "->setVisibility(";
- printVisibilityType(F->getVisibility());
- Out << ");";
- nl(Out);
- }
- if (F->hasGC()) {
- printCppName(F);
- Out << "->setGC(\"" << F->getGC() << "\");";
- nl(Out);
- }
- if (is_inline) {
- Out << "}";
- nl(Out);
- }
- printAttributes(F->getAttributes(), getCppName(F));
+ }
+ if (F->hasGC()) {
printCppName(F);
- Out << "->setAttributes(" << getCppName(F) << "_PAL);";
+ Out << "->setGC(\"" << F->getGC() << "\");";
nl(Out);
}
+ if (is_inline) {
+ Out << "}";
+ nl(Out);
+ }
+ printAttributes(F->getAttributes(), getCppName(F));
+ printCppName(F);
+ Out << "->setAttributes(" << getCppName(F) << "_PAL);";
+ nl(Out);
+}
- void CppWriter::printFunctionBody(const Function *F) {
- if (F->isDeclaration())
- return; // external functions have no bodies.
-
- // Clear the DefinedValues and ForwardRefs maps because we can't have
- // cross-function forward refs
- ForwardRefs.clear();
- DefinedValues.clear();
+void CppWriter::printFunctionBody(const Function *F) {
+ if (F->isDeclaration())
+ return; // external functions have no bodies.
- // Create all the argument values
- if (!is_inline) {
- if (!F->arg_empty()) {
- Out << "Function::arg_iterator args = " << getCppName(F)
- << "->arg_begin();";
- nl(Out);
- }
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- Out << "Value* " << getCppName(AI) << " = args++;";
- nl(Out);
- if (AI->hasName()) {
- Out << getCppName(AI) << "->setName(\"" << AI->getName() << "\");";
- nl(Out);
- }
- }
- }
+ // Clear the DefinedValues and ForwardRefs maps because we can't have
+ // cross-function forward refs
+ ForwardRefs.clear();
+ DefinedValues.clear();
- // Create all the basic blocks
- nl(Out);
- for (Function::const_iterator BI = F->begin(), BE = F->end();
- BI != BE; ++BI) {
- std::string bbname(getCppName(BI));
- Out << "BasicBlock* " << bbname <<
- " = BasicBlock::Create(mod->getContext(), \"";
- if (BI->hasName())
- printEscapedString(BI->getName());
- Out << "\"," << getCppName(BI->getParent()) << ",0);";
+ // Create all the argument values
+ if (!is_inline) {
+ if (!F->arg_empty()) {
+ Out << "Function::arg_iterator args = " << getCppName(F)
+ << "->arg_begin();";
nl(Out);
}
-
- // Output all of its basic blocks... for the function
- for (Function::const_iterator BI = F->begin(), BE = F->end();
- BI != BE; ++BI) {
- std::string bbname(getCppName(BI));
- nl(Out) << "// Block " << BI->getName() << " (" << bbname << ")";
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Out << "Value* " << getCppName(AI) << " = args++;";
nl(Out);
-
- // Output all of the instructions in the basic block...
- for (BasicBlock::const_iterator I = BI->begin(), E = BI->end();
- I != E; ++I) {
- printInstruction(I,bbname);
+ if (AI->hasName()) {
+ Out << getCppName(AI) << "->setName(\"" << AI->getName() << "\");";
+ nl(Out);
}
}
+ }
- // Loop over the ForwardRefs and resolve them now that all instructions
- // are generated.
- if (!ForwardRefs.empty()) {
- nl(Out) << "// Resolve Forward References";
- nl(Out);
- }
+ // Create all the basic blocks
+ nl(Out);
+ for (Function::const_iterator BI = F->begin(), BE = F->end();
+ BI != BE; ++BI) {
+ std::string bbname(getCppName(BI));
+ Out << "BasicBlock* " << bbname <<
+ " = BasicBlock::Create(mod->getContext(), \"";
+ if (BI->hasName())
+ printEscapedString(BI->getName());
+ Out << "\"," << getCppName(BI->getParent()) << ",0);";
+ nl(Out);
+ }
- while (!ForwardRefs.empty()) {
- ForwardRefMap::iterator I = ForwardRefs.begin();
- Out << I->second << "->replaceAllUsesWith("
- << getCppName(I->first) << "); delete " << I->second << ";";
- nl(Out);
- ForwardRefs.erase(I);
+ // Output all of its basic blocks... for the function
+ for (Function::const_iterator BI = F->begin(), BE = F->end();
+ BI != BE; ++BI) {
+ std::string bbname(getCppName(BI));
+ nl(Out) << "// Block " << BI->getName() << " (" << bbname << ")";
+ nl(Out);
+
+ // Output all of the instructions in the basic block...
+ for (BasicBlock::const_iterator I = BI->begin(), E = BI->end();
+ I != E; ++I) {
+ printInstruction(I,bbname);
}
}
- void CppWriter::printInline(const std::string& fname,
- const std::string& func) {
- const Function* F = TheModule->getFunction(func);
- if (!F) {
- error(std::string("Function '") + func + "' not found in input module");
- return;
- }
- if (F->isDeclaration()) {
- error(std::string("Function '") + func + "' is external!");
- return;
- }
- nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
- << getCppName(F);
- unsigned arg_count = 1;
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- Out << ", Value* arg_" << arg_count;
- }
- Out << ") {";
+ // Loop over the ForwardRefs and resolve them now that all instructions
+ // are generated.
+ if (!ForwardRefs.empty()) {
+ nl(Out) << "// Resolve Forward References";
nl(Out);
- is_inline = true;
- printFunctionUses(F);
- printFunctionBody(F);
- is_inline = false;
- Out << "return " << getCppName(F->begin()) << ";";
- nl(Out) << "}";
+ }
+
+ while (!ForwardRefs.empty()) {
+ ForwardRefMap::iterator I = ForwardRefs.begin();
+ Out << I->second << "->replaceAllUsesWith("
+ << getCppName(I->first) << "); delete " << I->second << ";";
nl(Out);
+ ForwardRefs.erase(I);
}
+}
- void CppWriter::printModuleBody() {
- // Print out all the type definitions
- nl(Out) << "// Type Definitions"; nl(Out);
- printTypes(TheModule);
-
- // Functions can call each other and global variables can reference them so
- // define all the functions first before emitting their function bodies.
- nl(Out) << "// Function Declarations"; nl(Out);
- for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
- I != E; ++I)
- printFunctionHead(I);
-
- // Process the global variables declarations. We can't initialze them until
- // after the constants are printed so just print a header for each global
- nl(Out) << "// Global Variable Declarations\n"; nl(Out);
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I) {
- printVariableHead(I);
- }
+void CppWriter::printInline(const std::string& fname,
+ const std::string& func) {
+ const Function* F = TheModule->getFunction(func);
+ if (!F) {
+ error(std::string("Function '") + func + "' not found in input module");
+ return;
+ }
+ if (F->isDeclaration()) {
+ error(std::string("Function '") + func + "' is external!");
+ return;
+ }
+ nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
+ << getCppName(F);
+ unsigned arg_count = 1;
+ for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+ AI != AE; ++AI) {
+ Out << ", Value* arg_" << arg_count;
+ }
+ Out << ") {";
+ nl(Out);
+ is_inline = true;
+ printFunctionUses(F);
+ printFunctionBody(F);
+ is_inline = false;
+ Out << "return " << getCppName(F->begin()) << ";";
+ nl(Out) << "}";
+ nl(Out);
+}
- // Print out all the constants definitions. Constants don't recurse except
- // through GlobalValues. All GlobalValues have been declared at this point
- // so we can proceed to generate the constants.
- nl(Out) << "// Constant Definitions"; nl(Out);
- printConstants(TheModule);
-
- // Process the global variables definitions now that all the constants have
- // been emitted. These definitions just couple the gvars with their constant
- // initializers.
- nl(Out) << "// Global Variable Definitions"; nl(Out);
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I) {
- printVariableBody(I);
- }
+void CppWriter::printModuleBody() {
+ // Print out all the type definitions
+ nl(Out) << "// Type Definitions"; nl(Out);
+ printTypes(TheModule);
+
+ // Functions can call each other and global variables can reference them so
+ // define all the functions first before emitting their function bodies.
+ nl(Out) << "// Function Declarations"; nl(Out);
+ for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I)
+ printFunctionHead(I);
+
+ // Process the global variables declarations. We can't initialze them until
+ // after the constants are printed so just print a header for each global
+ nl(Out) << "// Global Variable Declarations\n"; nl(Out);
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ printVariableHead(I);
+ }
- // Finally, we can safely put out all of the function bodies.
- nl(Out) << "// Function Definitions"; nl(Out);
- for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
- I != E; ++I) {
- if (!I->isDeclaration()) {
- nl(Out) << "// Function: " << I->getName() << " (" << getCppName(I)
- << ")";
- nl(Out) << "{";
- nl(Out,1);
- printFunctionBody(I);
- nl(Out,-1) << "}";
- nl(Out);
- }
- }
+ // Print out all the constants definitions. Constants don't recurse except
+ // through GlobalValues. All GlobalValues have been declared at this point
+ // so we can proceed to generate the constants.
+ nl(Out) << "// Constant Definitions"; nl(Out);
+ printConstants(TheModule);
+
+ // Process the global variables definitions now that all the constants have
+ // been emitted. These definitions just couple the gvars with their constant
+ // initializers.
+ nl(Out) << "// Global Variable Definitions"; nl(Out);
+ for (Module::const_global_iterator I = TheModule->global_begin(),
+ E = TheModule->global_end(); I != E; ++I) {
+ printVariableBody(I);
}
- void CppWriter::printProgram(const std::string& fname,
- const std::string& mName) {
- Out << "#include <llvm/LLVMContext.h>\n";
- Out << "#include <llvm/Module.h>\n";
- Out << "#include <llvm/DerivedTypes.h>\n";
- Out << "#include <llvm/Constants.h>\n";
- Out << "#include <llvm/GlobalVariable.h>\n";
- Out << "#include <llvm/Function.h>\n";
- Out << "#include <llvm/CallingConv.h>\n";
- Out << "#include <llvm/BasicBlock.h>\n";
- Out << "#include <llvm/Instructions.h>\n";
- Out << "#include <llvm/InlineAsm.h>\n";
- Out << "#include <llvm/Support/FormattedStream.h>\n";
- Out << "#include <llvm/Support/MathExtras.h>\n";
- Out << "#include <llvm/Pass.h>\n";
- Out << "#include <llvm/PassManager.h>\n";
- Out << "#include <llvm/ADT/SmallVector.h>\n";
- Out << "#include <llvm/Analysis/Verifier.h>\n";
- Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
- Out << "#include <algorithm>\n";
- Out << "using namespace llvm;\n\n";
- Out << "Module* " << fname << "();\n\n";
- Out << "int main(int argc, char**argv) {\n";
- Out << " Module* Mod = " << fname << "();\n";
- Out << " verifyModule(*Mod, PrintMessageAction);\n";
- Out << " PassManager PM;\n";
- Out << " PM.add(createPrintModulePass(&outs()));\n";
- Out << " PM.run(*Mod);\n";
- Out << " return 0;\n";
- Out << "}\n\n";
- printModule(fname,mName);
- }
-
- void CppWriter::printModule(const std::string& fname,
- const std::string& mName) {
- nl(Out) << "Module* " << fname << "() {";
- nl(Out,1) << "// Module Construction";
- nl(Out) << "Module* mod = new Module(\"";
- printEscapedString(mName);
- Out << "\", getGlobalContext());";
- if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
- }
- if (!TheModule->getTargetTriple().empty()) {
- nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
- << "\");";
+ // Finally, we can safely put out all of the function bodies.
+ nl(Out) << "// Function Definitions"; nl(Out);
+ for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+ I != E; ++I) {
+ if (!I->isDeclaration()) {
+ nl(Out) << "// Function: " << I->getName() << " (" << getCppName(I)
+ << ")";
+ nl(Out) << "{";
+ nl(Out,1);
+ printFunctionBody(I);
+ nl(Out,-1) << "}";
+ nl(Out);
}
+ }
+}
- if (!TheModule->getModuleInlineAsm().empty()) {
- nl(Out) << "mod->setModuleInlineAsm(\"";
- printEscapedString(TheModule->getModuleInlineAsm());
- Out << "\");";
- }
- nl(Out);
+void CppWriter::printProgram(const std::string& fname,
+ const std::string& mName) {
+ Out << "#include <llvm/LLVMContext.h>\n";
+ Out << "#include <llvm/Module.h>\n";
+ Out << "#include <llvm/DerivedTypes.h>\n";
+ Out << "#include <llvm/Constants.h>\n";
+ Out << "#include <llvm/GlobalVariable.h>\n";
+ Out << "#include <llvm/Function.h>\n";
+ Out << "#include <llvm/CallingConv.h>\n";
+ Out << "#include <llvm/BasicBlock.h>\n";
+ Out << "#include <llvm/Instructions.h>\n";
+ Out << "#include <llvm/InlineAsm.h>\n";
+ Out << "#include <llvm/Support/FormattedStream.h>\n";
+ Out << "#include <llvm/Support/MathExtras.h>\n";
+ Out << "#include <llvm/Pass.h>\n";
+ Out << "#include <llvm/PassManager.h>\n";
+ Out << "#include <llvm/ADT/SmallVector.h>\n";
+ Out << "#include <llvm/Analysis/Verifier.h>\n";
+ Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
+ Out << "#include <algorithm>\n";
+ Out << "using namespace llvm;\n\n";
+ Out << "Module* " << fname << "();\n\n";
+ Out << "int main(int argc, char**argv) {\n";
+ Out << " Module* Mod = " << fname << "();\n";
+ Out << " verifyModule(*Mod, PrintMessageAction);\n";
+ Out << " PassManager PM;\n";
+ Out << " PM.add(createPrintModulePass(&outs()));\n";
+ Out << " PM.run(*Mod);\n";
+ Out << " return 0;\n";
+ Out << "}\n\n";
+ printModule(fname,mName);
+}
- // Loop over the dependent libraries and emit them.
- Module::lib_iterator LI = TheModule->lib_begin();
- Module::lib_iterator LE = TheModule->lib_end();
- while (LI != LE) {
- Out << "mod->addLibrary(\"" << *LI << "\");";
- nl(Out);
- ++LI;
- }
- printModuleBody();
- nl(Out) << "return mod;";
- nl(Out,-1) << "}";
+void CppWriter::printModule(const std::string& fname,
+ const std::string& mName) {
+ nl(Out) << "Module* " << fname << "() {";
+ nl(Out,1) << "// Module Construction";
+ nl(Out) << "Module* mod = new Module(\"";
+ printEscapedString(mName);
+ Out << "\", getGlobalContext());";
+ if (!TheModule->getTargetTriple().empty()) {
+ nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
+ }
+ if (!TheModule->getTargetTriple().empty()) {
+ nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
+ << "\");";
+ }
+
+ if (!TheModule->getModuleInlineAsm().empty()) {
+ nl(Out) << "mod->setModuleInlineAsm(\"";
+ printEscapedString(TheModule->getModuleInlineAsm());
+ Out << "\");";
+ }
+ nl(Out);
+
+ // Loop over the dependent libraries and emit them.
+ Module::lib_iterator LI = TheModule->lib_begin();
+ Module::lib_iterator LE = TheModule->lib_end();
+ while (LI != LE) {
+ Out << "mod->addLibrary(\"" << *LI << "\");";
nl(Out);
+ ++LI;
}
+ printModuleBody();
+ nl(Out) << "return mod;";
+ nl(Out,-1) << "}";
+ nl(Out);
+}
+
+void CppWriter::printContents(const std::string& fname,
+ const std::string& mName) {
+ Out << "\nModule* " << fname << "(Module *mod) {\n";
+ Out << "\nmod->setModuleIdentifier(\"";
+ printEscapedString(mName);
+ Out << "\");\n";
+ printModuleBody();
+ Out << "\nreturn mod;\n";
+ Out << "\n}\n";
+}
- void CppWriter::printContents(const std::string& fname,
- const std::string& mName) {
- Out << "\nModule* " << fname << "(Module *mod) {\n";
- Out << "\nmod->setModuleIdentifier(\"";
- printEscapedString(mName);
- Out << "\");\n";
- printModuleBody();
- Out << "\nreturn mod;\n";
- Out << "\n}\n";
+void CppWriter::printFunction(const std::string& fname,
+ const std::string& funcName) {
+ const Function* F = TheModule->getFunction(funcName);
+ if (!F) {
+ error(std::string("Function '") + funcName + "' not found in input module");
+ return;
}
+ Out << "\nFunction* " << fname << "(Module *mod) {\n";
+ printFunctionUses(F);
+ printFunctionHead(F);
+ printFunctionBody(F);
+ Out << "return " << getCppName(F) << ";\n";
+ Out << "}\n";
+}
- void CppWriter::printFunction(const std::string& fname,
- const std::string& funcName) {
- const Function* F = TheModule->getFunction(funcName);
- if (!F) {
- error(std::string("Function '") + funcName + "' not found in input module");
- return;
- }
- Out << "\nFunction* " << fname << "(Module *mod) {\n";
- printFunctionUses(F);
- printFunctionHead(F);
- printFunctionBody(F);
- Out << "return " << getCppName(F) << ";\n";
- Out << "}\n";
- }
-
- void CppWriter::printFunctions() {
- const Module::FunctionListType &funcs = TheModule->getFunctionList();
- Module::const_iterator I = funcs.begin();
- Module::const_iterator IE = funcs.end();
-
- for (; I != IE; ++I) {
- const Function &func = *I;
- if (!func.isDeclaration()) {
- std::string name("define_");
- name += func.getName();
- printFunction(name, func.getName());
- }
+void CppWriter::printFunctions() {
+ const Module::FunctionListType &funcs = TheModule->getFunctionList();
+ Module::const_iterator I = funcs.begin();
+ Module::const_iterator IE = funcs.end();
+
+ for (; I != IE; ++I) {
+ const Function &func = *I;
+ if (!func.isDeclaration()) {
+ std::string name("define_");
+ name += func.getName();
+ printFunction(name, func.getName());
}
}
+}
- void CppWriter::printVariable(const std::string& fname,
- const std::string& varName) {
- const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
+void CppWriter::printVariable(const std::string& fname,
+ const std::string& varName) {
+ const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
- if (!GV) {
- error(std::string("Variable '") + varName + "' not found in input module");
- return;
- }
- Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
- printVariableUses(GV);
- printVariableHead(GV);
- printVariableBody(GV);
- Out << "return " << getCppName(GV) << ";\n";
- Out << "}\n";
- }
-
- void CppWriter::printType(const std::string& fname,
- const std::string& typeName) {
- const Type* Ty = TheModule->getTypeByName(typeName);
- if (!Ty) {
- error(std::string("Type '") + typeName + "' not found in input module");
- return;
- }
- Out << "\nType* " << fname << "(Module *mod) {\n";
- printType(Ty);
- Out << "return " << getCppName(Ty) << ";\n";
- Out << "}\n";
- }
-
- bool CppWriter::runOnModule(Module &M) {
- TheModule = &M;
-
- // Emit a header
- Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
-
- // Get the name of the function we're supposed to generate
- std::string fname = FuncName.getValue();
-
- // Get the name of the thing we are to generate
- std::string tgtname = NameToGenerate.getValue();
- if (GenerationType == GenModule ||
- GenerationType == GenContents ||
- GenerationType == GenProgram ||
- GenerationType == GenFunctions) {
- if (tgtname == "!bad!") {
- if (M.getModuleIdentifier() == "-")
- tgtname = "<stdin>";
- else
- tgtname = M.getModuleIdentifier();
- }
- } else if (tgtname == "!bad!")
- error("You must use the -for option with -gen-{function,variable,type}");
-
- switch (WhatToGenerate(GenerationType)) {
- case GenProgram:
- if (fname.empty())
- fname = "makeLLVMModule";
- printProgram(fname,tgtname);
- break;
- case GenModule:
- if (fname.empty())
- fname = "makeLLVMModule";
- printModule(fname,tgtname);
- break;
- case GenContents:
- if (fname.empty())
- fname = "makeLLVMModuleContents";
- printContents(fname,tgtname);
- break;
- case GenFunction:
- if (fname.empty())
- fname = "makeLLVMFunction";
- printFunction(fname,tgtname);
- break;
- case GenFunctions:
- printFunctions();
- break;
- case GenInline:
- if (fname.empty())
- fname = "makeLLVMInline";
- printInline(fname,tgtname);
- break;
- case GenVariable:
- if (fname.empty())
- fname = "makeLLVMVariable";
- printVariable(fname,tgtname);
- break;
- case GenType:
- if (fname.empty())
- fname = "makeLLVMType";
- printType(fname,tgtname);
- break;
- default:
- error("Invalid generation option");
- }
+ if (!GV) {
+ error(std::string("Variable '") + varName + "' not found in input module");
+ return;
+ }
+ Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
+ printVariableUses(GV);
+ printVariableHead(GV);
+ printVariableBody(GV);
+ Out << "return " << getCppName(GV) << ";\n";
+ Out << "}\n";
+}
- return false;
+void CppWriter::printType(const std::string& fname,
+ const std::string& typeName) {
+ const Type* Ty = TheModule->getTypeByName(typeName);
+ if (!Ty) {
+ error(std::string("Type '") + typeName + "' not found in input module");
+ return;
}
+ Out << "\nType* " << fname << "(Module *mod) {\n";
+ printType(Ty);
+ Out << "return " << getCppName(Ty) << ";\n";
+ Out << "}\n";
+}
+
+bool CppWriter::runOnModule(Module &M) {
+ TheModule = &M;
+
+ // Emit a header
+ Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
+
+ // Get the name of the function we're supposed to generate
+ std::string fname = FuncName.getValue();
+
+ // Get the name of the thing we are to generate
+ std::string tgtname = NameToGenerate.getValue();
+ if (GenerationType == GenModule ||
+ GenerationType == GenContents ||
+ GenerationType == GenProgram ||
+ GenerationType == GenFunctions) {
+ if (tgtname == "!bad!") {
+ if (M.getModuleIdentifier() == "-")
+ tgtname = "<stdin>";
+ else
+ tgtname = M.getModuleIdentifier();
+ }
+ } else if (tgtname == "!bad!")
+ error("You must use the -for option with -gen-{function,variable,type}");
+
+ switch (WhatToGenerate(GenerationType)) {
+ case GenProgram:
+ if (fname.empty())
+ fname = "makeLLVMModule";
+ printProgram(fname,tgtname);
+ break;
+ case GenModule:
+ if (fname.empty())
+ fname = "makeLLVMModule";
+ printModule(fname,tgtname);
+ break;
+ case GenContents:
+ if (fname.empty())
+ fname = "makeLLVMModuleContents";
+ printContents(fname,tgtname);
+ break;
+ case GenFunction:
+ if (fname.empty())
+ fname = "makeLLVMFunction";
+ printFunction(fname,tgtname);
+ break;
+ case GenFunctions:
+ printFunctions();
+ break;
+ case GenInline:
+ if (fname.empty())
+ fname = "makeLLVMInline";
+ printInline(fname,tgtname);
+ break;
+ case GenVariable:
+ if (fname.empty())
+ fname = "makeLLVMVariable";
+ printVariable(fname,tgtname);
+ break;
+ case GenType:
+ if (fname.empty())
+ fname = "makeLLVMType";
+ printType(fname,tgtname);
+ break;
+ default:
+ error("Invalid generation option");
+ }
+
+ return false;
}
char CppWriter::ID = 0;
diff --git a/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp b/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp
index e42e9b3..b6e4d65 100644
--- a/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/AsmPrinter/MBlazeAsmPrinter.cpp
@@ -145,8 +145,9 @@ void MBlazeAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned RegNum = MBlazeRegisterInfo::getRegisterNumbering(CSI[i].getReg());
- if (CSI[i].getRegClass() == MBlaze::CPURegsRegisterClass)
+ unsigned Reg = CSI[i].getReg();
+ unsigned RegNum = MBlazeRegisterInfo::getRegisterNumbering(Reg);
+ if (MBlaze::CPURegsRegisterClass->contains(Reg))
CPUBitmask |= (1 << RegNum);
}
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
index 23889b1..1730b68 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -234,6 +234,24 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineRegisterInfo &R = F->getRegInfo();
MachineBasicBlock *loop = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *finish = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, loop);
+ F->insert(It, finish);
+
+ // Update machine-CFG edges by transfering adding all successors and
+ // remaining instructions from the current block to the new block which
+ // will contain the Phi node for the select.
+ finish->splice(finish->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ finish->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(loop);
+ BB->addSuccessor(finish);
+
+ // Next, add the finish block as a successor of the loop block
+ loop->addSuccessor(finish);
+ loop->addSuccessor(loop);
unsigned IAMT = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
BuildMI(BB, dl, TII->get(MBlaze::ANDI), IAMT)
@@ -249,26 +267,6 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(IAMT)
.addMBB(finish);
- F->insert(It, loop);
- F->insert(It, finish);
-
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- finish->addSuccessor(*i);
-
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
- BB->addSuccessor(loop);
- BB->addSuccessor(finish);
-
- // Next, add the finish block as a successor of the loop block
- loop->addSuccessor(finish);
- loop->addSuccessor(loop);
-
unsigned DST = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
unsigned NDST = R.createVirtualRegister(MBlaze::CPURegsRegisterClass);
BuildMI(loop, dl, TII->get(MBlaze::PHI), DST)
@@ -298,12 +296,13 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
.addReg(NAMT)
.addMBB(loop);
- BuildMI(finish, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
+ BuildMI(*finish, finish->begin(), dl,
+ TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
.addReg(IVAL).addMBB(BB)
.addReg(NDST).addMBB(loop);
// The pseudo instruction is no longer needed so remove it
- F->DeleteMachineInstr(MI);
+ MI->eraseFromParent();
return finish;
}
@@ -338,27 +337,23 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case MBlazeCC::LE: Opc = MBlaze::BGTID; break;
}
- BuildMI(BB, dl, TII->get(Opc))
- .addReg(MI->getOperand(3).getReg())
- .addMBB(dneBB);
-
F->insert(It, flsBB);
F->insert(It, dneBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- dneBB->addSuccessor(*i);
+ // Transfer the remainder of BB and its successor edges to dneBB.
+ dneBB->splice(dneBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ dneBB->transferSuccessorsAndUpdatePHIs(BB);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
BB->addSuccessor(flsBB);
BB->addSuccessor(dneBB);
flsBB->addSuccessor(dneBB);
+ BuildMI(BB, dl, TII->get(Opc))
+ .addReg(MI->getOperand(3).getReg())
+ .addMBB(dneBB);
+
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
@@ -366,11 +361,12 @@ MBlazeTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// .addReg(MI->getOperand(1).getReg()).addMBB(flsBB)
// .addReg(MI->getOperand(2).getReg()).addMBB(BB);
- BuildMI(dneBB, dl, TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
+ BuildMI(*dneBB, dneBB->begin(), dl,
+ TII->get(MBlaze::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(flsBB)
.addReg(MI->getOperand(1).getReg()).addMBB(BB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return dneBB;
}
}
@@ -408,7 +404,7 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, GA);
}
@@ -439,10 +435,8 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
SDValue MBlazeTargetLowering::
LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
SDValue ResNode;
- EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
const Constant *C = N->getConstVal();
- SDValue Zero = DAG.getConstant(0, PtrVT);
DebugLoc dl = Op.getDebugLoc();
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
@@ -531,6 +525,7 @@ SDValue MBlazeTargetLowering::
LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -562,7 +557,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT RegVT = VA.getLocVT();
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -590,7 +585,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// Create the frame index object for this incoming parameter
LastArgStackLoc = (FirstStackArgLoc + VA.getLocMemOffset());
int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
- LastArgStackLoc, true, false);
+ LastArgStackLoc, true);
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
@@ -623,7 +618,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv,
// node so that legalize doesn't hack it.
unsigned char OpFlag = MBlazeII::MO_NO_FLAG;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(),
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
getPointerTy(), 0, OpFlag);
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
@@ -779,7 +774,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
// offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit.
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
- int FI = MFI->CreateFixedObject(ArgSize, 0, true, false);
+ int FI = MFI->CreateFixedObject(ArgSize, 0, true);
MBlazeFI->recordLoadArgsFI(FI, -(ArgSize+
(FirstStackArgLoc + VA.getLocMemOffset())));
@@ -810,7 +805,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
unsigned LiveReg = MF.addLiveIn(Reg, RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, LiveReg, MVT::i32);
- int FI = MFI->CreateFixedObject(4, 0, true, false);
+ int FI = MFI->CreateFixedObject(4, 0, true);
MBlazeFI->recordStoreVarArgsFI(FI, -(4+(StackLoc*4)));
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, NULL, 0,
@@ -841,6 +836,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
SDValue MBlazeTargetLowering::
LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
// the return value to a location
@@ -869,7 +865,7 @@ LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
// guarantee that all emitted copies are
// stuck together, avoiding something bad
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
index 9f9ac89..5ec2563 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeISelLowering.h
@@ -109,6 +109,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -117,6 +118,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
virtual MachineBasicBlock *
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
index 4c4d86b..6ff5825 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.cpp
@@ -110,15 +110,13 @@ insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const {
BuildMI(MBB, MI, DL, get(MBlaze::NOP));
}
-bool MBlazeInstrInfo::
-copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
+void MBlazeInstrInfo::
+copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
llvm::BuildMI(MBB, I, DL, get(MBlaze::ADD), DestReg)
- .addReg(SrcReg).addReg(MBlaze::R0);
- return true;
+ .addReg(SrcReg, getKillRegState(KillSrc)).addReg(MBlaze::R0);
}
void MBlazeInstrInfo::
@@ -141,54 +139,17 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
.addImm(0).addFrameIndex(FI);
}
-MachineInstr *MBlazeInstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const {
- if (Ops.size() != 1) return NULL;
-
- MachineInstr *NewMI = NULL;
-
- switch (MI->getOpcode()) {
- case MBlaze::OR:
- case MBlaze::ADD:
- if ((MI->getOperand(0).isReg()) &&
- (MI->getOperand(2).isReg()) &&
- (MI->getOperand(2).getReg() == MBlaze::R0) &&
- (MI->getOperand(1).isReg())) {
- if (Ops[0] == 0) { // COPY -> STORE
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::SW))
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI);
- } else { // COPY -> LOAD
- unsigned DstReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::LW))
- .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
- getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI);
- }
- }
- break;
- }
-
- return NewMI;
-}
-
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
unsigned MBlazeInstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Can only insert uncond branches so far.
assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, DebugLoc(), get(MBlaze::BRI)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(MBlaze::BRI)).addMBB(TBB);
return 1;
}
@@ -209,12 +170,8 @@ unsigned MBlazeInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
GlobalBaseReg = RegInfo.createVirtualRegister(MBlaze::CPURegsRegisterClass);
- bool Ok = TII->copyRegToReg(FirstMBB, MBBI, GlobalBaseReg, MBlaze::R20,
- MBlaze::CPURegsRegisterClass,
- MBlaze::CPURegsRegisterClass,
- DebugLoc());
- assert(Ok && "Couldn't assign to global base register!");
- Ok = Ok; // Silence warning when assertions are turned off.
+ BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ GlobalBaseReg).addReg(MBlaze::R20);
RegInfo.addLiveIn(MBlaze::R20);
MBlazeFI->setGlobalBaseReg(GlobalBaseReg);
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
index c9fdc88..f074370 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeInstrInfo.h
@@ -198,13 +198,12 @@ public:
/// Branch Analysis
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -217,18 +216,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
/// Insert nop instruction when hazard condition is found
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
index 7ae465d..4abeb2e 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.cpp
@@ -14,7 +14,7 @@
#include "MBlazeMCAsmInfo.h"
using namespace llvm;
-MBlazeMCAsmInfo::MBlazeMCAsmInfo(const Target &T, const StringRef &TT) {
+MBlazeMCAsmInfo::MBlazeMCAsmInfo(const Target &T, StringRef TT) {
AlignmentIsInBytes = false;
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
index bccb418..9d6ff3a 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeMCAsmInfo.h
@@ -14,15 +14,15 @@
#ifndef MBLAZETARGETASMINFO_H
#define MBLAZETARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
class MBlazeMCAsmInfo : public MCAsmInfo {
public:
- explicit MBlazeMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit MBlazeMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
index f15eea9..8cafa8c 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
@@ -148,22 +148,6 @@ getCalleeSavedRegs(const MachineFunction *MF) const {
return CalleeSavedRegs;
}
-/// MBlaze Callee Saved Register Classes
-const TargetRegisterClass* const* MBlazeRegisterInfo::
-getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRC[] = {
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- &MBlaze::CPURegsRegClass, &MBlaze::CPURegsRegClass,
- 0
- };
-
- return CalleeSavedRC;
-}
-
BitVector MBlazeRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
diff --git a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
index b618bf4..af97b0e 100644
--- a/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
+++ b/contrib/llvm/lib/Target/MBlaze/MBlazeRegisterInfo.h
@@ -54,9 +54,6 @@ struct MBlazeRegisterInfo : public MBlazeGenRegisterInfo {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction* MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/MSIL/MSILWriter.cpp b/contrib/llvm/lib/Target/MSIL/MSILWriter.cpp
index 3de173c..cc350e8 100644
--- a/contrib/llvm/lib/Target/MSIL/MSILWriter.cpp
+++ b/contrib/llvm/lib/Target/MSIL/MSILWriter.cpp
@@ -808,7 +808,7 @@ void MSILWriter::printIntrinsicCall(const IntrinsicInst* Inst) {
std::string Name;
switch (Inst->getIntrinsicID()) {
case Intrinsic::vastart:
- Name = getValueName(Inst->getOperand(1));
+ Name = getValueName(Inst->getArgOperand(0));
Name.insert(Name.length()-1,"$valist");
// Obtain the argument handle.
printSimpleInstruction("ldloca",Name.c_str());
@@ -817,20 +817,20 @@ void MSILWriter::printIntrinsicCall(const IntrinsicInst* Inst) {
"instance void [mscorlib]System.ArgIterator::.ctor"
"(valuetype [mscorlib]System.RuntimeArgumentHandle)");
// Save as pointer type "void*"
- printValueLoad(Inst->getOperand(1));
+ printValueLoad(Inst->getArgOperand(0));
printSimpleInstruction("ldloca",Name.c_str());
printIndirectSave(PointerType::getUnqual(
IntegerType::get(Inst->getContext(), 8)));
break;
case Intrinsic::vaend:
// Close argument list handle.
- printIndirectLoad(Inst->getOperand(1));
+ printIndirectLoad(Inst->getArgOperand(0));
printSimpleInstruction("call","instance void [mscorlib]System.ArgIterator::End()");
break;
case Intrinsic::vacopy:
// Copy "ArgIterator" valuetype.
- printIndirectLoad(Inst->getOperand(1));
- printIndirectLoad(Inst->getOperand(2));
+ printIndirectLoad(Inst->getArgOperand(0));
+ printIndirectLoad(Inst->getArgOperand(1));
printSimpleInstruction("cpobj","[mscorlib]System.ArgIterator");
break;
default:
@@ -845,10 +845,11 @@ void MSILWriter::printCallInstruction(const Instruction* Inst) {
// Handle intrinsic function.
printIntrinsicCall(cast<IntrinsicInst>(Inst));
} else {
+ const CallInst *CI = cast<CallInst>(Inst);
// Load arguments to stack and call function.
- for (int I = 1, E = Inst->getNumOperands(); I!=E; ++I)
- printValueLoad(Inst->getOperand(I));
- printFunctionCall(Inst->getOperand(0),Inst);
+ for (int I = 0, E = CI->getNumArgOperands(); I!=E; ++I)
+ printValueLoad(CI->getArgOperand(I));
+ printFunctionCall(CI->getCalledFunction(), Inst);
}
}
@@ -1002,8 +1003,8 @@ void MSILWriter::printInvokeInstruction(const InvokeInst* Inst) {
std::string Label = "leave$normal_"+utostr(getUniqID());
Out << ".try {\n";
// Load arguments
- for (int I = 3, E = Inst->getNumOperands(); I!=E; ++I)
- printValueLoad(Inst->getOperand(I));
+ for (int I = 0, E = Inst->getNumArgOperands(); I!=E; ++I)
+ printValueLoad(Inst->getArgOperand(I));
// Print call instruction
printFunctionCall(Inst->getOperand(0),Inst);
// Save function result and leave "try" block
@@ -1280,7 +1281,7 @@ void MSILWriter::printLocalVariables(const Function& F) {
case Intrinsic::vaend:
case Intrinsic::vacopy:
isVaList = true;
- VaList = Inst->getOperand(1);
+ VaList = Inst->getArgOperand(0);
break;
default:
isVaList = false;
@@ -1620,8 +1621,7 @@ const char* MSILWriter::getLibraryName(const GlobalVariable* GV) {
}
-const char* MSILWriter::getLibraryForSymbol(const StringRef &Name,
- bool isFunction,
+const char* MSILWriter::getLibraryForSymbol(StringRef Name, bool isFunction,
CallingConv::ID CallingConv) {
// TODO: Read *.def file with function and libraries definitions.
return "MSVCRT.DLL";
diff --git a/contrib/llvm/lib/Target/MSIL/MSILWriter.h b/contrib/llvm/lib/Target/MSIL/MSILWriter.h
index a95ae23..92a3abe 100644
--- a/contrib/llvm/lib/Target/MSIL/MSILWriter.h
+++ b/contrib/llvm/lib/Target/MSIL/MSILWriter.h
@@ -246,7 +246,7 @@ namespace llvm {
const char* getLibraryName(const GlobalVariable* GV);
- const char* getLibraryForSymbol(const StringRef &Name, bool isFunction,
+ const char* getLibraryForSymbol(StringRef Name, bool isFunction,
CallingConv::ID CallingConv);
void printExternals();
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 7b328bb..3395e9f 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -272,7 +272,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N,
AM.Base.Reg;
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i16, AM.Disp,
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, Op->getDebugLoc(),
+ MVT::i16, AM.Disp,
0/*AM.SymbolFlags*/);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i16,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
index 403400e..a1703a3 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -278,6 +278,7 @@ MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -290,7 +291,7 @@ MSP430TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case CallingConv::Fast:
case CallingConv::C:
return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, Ins, dl, DAG, InVals);
+ Outs, OutVals, Ins, dl, DAG, InVals);
case CallingConv::MSP430_INTR:
report_fatal_error("ISRs cannot be called directly");
return SDValue();
@@ -369,7 +370,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
<< "\n";
}
// Create the frame index object for this incoming parameter...
- int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true, false);
+ int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset(), true);
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
@@ -387,6 +388,7 @@ SDValue
MSP430TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
@@ -421,7 +423,7 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together,
// avoiding something bad.
@@ -447,6 +449,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg>
&Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -471,7 +474,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -529,7 +532,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i16);
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i16);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i16);
@@ -642,7 +645,8 @@ SDValue MSP430TargetLowering::LowerGlobalAddress(SDValue Op,
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
// Create the TargetGlobalAddress node, folding in the constant offset.
- SDValue Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
+ SDValue Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ getPointerTy(), Offset);
return DAG.getNode(MSP430ISD::Wrapper, Op.getDebugLoc(),
getPointerTy(), Result);
}
@@ -888,7 +892,7 @@ MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
// Set up a frame object for the return address.
uint64_t SlotSize = TD->getPointerSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
- true, false);
+ true);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
@@ -1070,7 +1074,10 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
// Update machine-CFG edges by transferring all successors of the current
// block to the block containing instructions after shift.
- RemBB->transferSuccessors(BB);
+ RemBB->splice(RemBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ RemBB->transferSuccessorsAndUpdatePHIs(BB);
// Add adges BB => LoopBB => RemBB, BB => RemBB, LoopBB => LoopBB
BB->addSuccessor(LoopBB);
@@ -1116,11 +1123,11 @@ MSP430TargetLowering::EmitShiftInstr(MachineInstr *MI,
// RemBB:
// DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB]
- BuildMI(RemBB, dl, TII.get(MSP430::PHI), DstReg)
+ BuildMI(*RemBB, RemBB->begin(), dl, TII.get(MSP430::PHI), DstReg)
.addReg(SrcReg).addMBB(BB)
.addReg(ShiftReg2).addMBB(LoopBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return RemBB;
}
@@ -1158,18 +1165,22 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
- BuildMI(BB, dl, TII.get(MSP430::JCC))
- .addMBB(copy1MBB)
- .addImm(MI->getOperand(3).getImm());
F->insert(I, copy0MBB);
F->insert(I, copy1MBB);
// Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
- copy1MBB->transferSuccessors(BB);
+ copy1MBB->splice(copy1MBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(copy1MBB);
+ BuildMI(BB, dl, TII.get(MSP430::JCC))
+ .addMBB(copy1MBB)
+ .addImm(MI->getOperand(3).getImm());
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to copy1MBB
@@ -1182,11 +1193,11 @@ MSP430TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = copy1MBB;
- BuildMI(BB, dl, TII.get(MSP430::PHI),
+ BuildMI(*BB, BB->begin(), dl, TII.get(MSP430::PHI),
MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
index 01c5071..673c543 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430ISelLowering.h
@@ -127,6 +127,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -155,6 +156,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -163,6 +165,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index 18226ab..df28d07 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -83,27 +83,20 @@ void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Cannot store this register to stack slot!");
}
-bool MSP430InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DestRC == SrcRC) {
- unsigned Opc;
- if (DestRC == &MSP430::GR16RegClass) {
- Opc = MSP430::MOV16rr;
- } else if (DestRC == &MSP430::GR8RegClass) {
- Opc = MSP430::MOV8rr;
- } else {
- return false;
- }
-
- BuildMI(MBB, I, DL, get(Opc), DestReg).addReg(SrcReg);
- return true;
- }
+void MSP430InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc;
+ if (MSP430::GR16RegClass.contains(DestReg, SrcReg))
+ Opc = MSP430::MOV16rr;
+ else if (MSP430::GR8RegClass.contains(DestReg, SrcReg))
+ Opc = MSP430::MOV8rr;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
- return false;
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
bool
@@ -330,10 +323,8 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned
MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc DL;
-
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 842b4cb..ebbda1a 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -49,11 +49,10 @@ public:
///
virtual const TargetRegisterInfo &getRegisterInfo() const { return RI; }
- bool copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
bool isMoveInstr(const MachineInstr& MI,
unsigned &SrcReg, unsigned &DstReg,
@@ -93,7 +92,8 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
};
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
index 6b9a2f2..8792b22 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
+++ b/contrib/llvm/lib/Target/MSP430/MSP430InstrInfo.td
@@ -25,13 +25,16 @@ class SDTCisI16<int OpNum> : SDTCisVT<OpNum, i16>;
def SDT_MSP430Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
def SDT_MSP430CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i16>]>;
def SDT_MSP430CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
-def SDT_MSP430Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def SDT_MSP430Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
def SDT_MSP430Cmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
def SDT_MSP430BrCC : SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>,
SDTCisVT<1, i8>]>;
-def SDT_MSP430SelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>,
+def SDT_MSP430SelectCC : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>,
SDTCisVT<3, i8>]>;
-def SDT_MSP430Shift : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisI8<2>]>;
+def SDT_MSP430Shift : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
+ SDTCisI8<2>]>;
//===----------------------------------------------------------------------===//
// MSP430 Specific Node Definitions.
@@ -46,7 +49,7 @@ def MSP430rla : SDNode<"MSP430ISD::RLA", SDTIntUnaryOp, []>;
def MSP430rrc : SDNode<"MSP430ISD::RRC", SDTIntUnaryOp, []>;
def MSP430call : SDNode<"MSP430ISD::CALL", SDT_MSP430Call,
- [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
+ [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag, SDNPVariadic]>;
def MSP430callseq_start :
SDNode<"ISD::CALLSEQ_START", SDT_MSP430CallSeqStart,
[SDNPHasChain, SDNPOutFlag]>;
@@ -55,8 +58,10 @@ def MSP430callseq_end :
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
def MSP430Wrapper : SDNode<"MSP430ISD::Wrapper", SDT_MSP430Wrapper>;
def MSP430cmp : SDNode<"MSP430ISD::CMP", SDT_MSP430Cmp, [SDNPOutFlag]>;
-def MSP430brcc : SDNode<"MSP430ISD::BR_CC", SDT_MSP430BrCC, [SDNPHasChain, SDNPInFlag]>;
-def MSP430selectcc: SDNode<"MSP430ISD::SELECT_CC", SDT_MSP430SelectCC, [SDNPInFlag]>;
+def MSP430brcc : SDNode<"MSP430ISD::BR_CC", SDT_MSP430BrCC,
+ [SDNPHasChain, SDNPInFlag]>;
+def MSP430selectcc: SDNode<"MSP430ISD::SELECT_CC", SDT_MSP430SelectCC,
+ [SDNPInFlag]>;
def MSP430shl : SDNode<"MSP430ISD::SHL", SDT_MSP430Shift, []>;
def MSP430sra : SDNode<"MSP430ISD::SRA", SDT_MSP430Shift, []>;
def MSP430srl : SDNode<"MSP430ISD::SRL", SDT_MSP430Shift, []>;
@@ -117,14 +122,14 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins i16imm:$amt1, i16imm:$amt2),
}
let usesCustomInserter = 1 in {
- def Select8 : Pseudo<(outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cc),
+ def Select8 : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$src2, i8imm:$cc),
"# Select8 PSEUDO",
[(set GR8:$dst,
- (MSP430selectcc GR8:$src1, GR8:$src2, imm:$cc))]>;
- def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cc),
+ (MSP430selectcc GR8:$src, GR8:$src2, imm:$cc))]>;
+ def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src, GR16:$src2, i8imm:$cc),
"# Select16 PSEUDO",
[(set GR16:$dst,
- (MSP430selectcc GR16:$src1, GR16:$src2, imm:$cc))]>;
+ (MSP430selectcc GR16:$src, GR16:$src2, imm:$cc))]>;
let Defs = [SRW] in {
def Shl8 : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$cnt),
"# Shl8 PSEUDO",
@@ -330,60 +335,60 @@ def MOV16mm : I16mm<0x0,
//===----------------------------------------------------------------------===//
// Arithmetic Instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src = $dst" in {
let Defs = [SRW] in {
let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
def ADD8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"add.b\t{$src2, $dst}",
- [(set GR8:$dst, (add GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (add GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def ADD16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"add.w\t{$src2, $dst}",
- [(set GR16:$dst, (add GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (add GR16:$src, GR16:$src2)),
(implicit SRW)]>;
}
def ADD8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"add.b\t{$src2, $dst}",
- [(set GR8:$dst, (add GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (add GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def ADD16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"add.w\t{$src2, $dst}",
- [(set GR16:$dst, (add GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (add GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
let mayLoad = 1, hasExtraDefRegAllocReq = 1,
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
def ADD8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR8:$dst, GR16:$base_wb),
- (ins GR8:$src1, GR16:$base),
+ (ins GR8:$src, GR16:$base),
"add.b\t{@$base+, $dst}", []>;
def ADD16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$dst, GR16:$base_wb),
- (ins GR16:$src1, GR16:$base),
+ (ins GR16:$src, GR16:$base),
"add.w\t{@$base+, $dst}", []>;
}
def ADD8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"add.b\t{$src2, $dst}",
- [(set GR8:$dst, (add GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (add GR8:$src, imm:$src2)),
(implicit SRW)]>;
def ADD16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"add.w\t{$src2, $dst}",
- [(set GR16:$dst, (add GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (add GR16:$src, imm:$src2)),
(implicit SRW)]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def ADD8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"add.b\t{$src, $dst}",
@@ -424,40 +429,40 @@ let Uses = [SRW] in {
let isCommutable = 1 in { // X = ADDC Y, Z == X = ADDC Z, Y
def ADC8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"addc.b\t{$src2, $dst}",
- [(set GR8:$dst, (adde GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (adde GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def ADC16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"addc.w\t{$src2, $dst}",
- [(set GR16:$dst, (adde GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (adde GR16:$src, GR16:$src2)),
(implicit SRW)]>;
} // isCommutable
def ADC8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"addc.b\t{$src2, $dst}",
- [(set GR8:$dst, (adde GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (adde GR8:$src, imm:$src2)),
(implicit SRW)]>;
def ADC16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"addc.w\t{$src2, $dst}",
- [(set GR16:$dst, (adde GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (adde GR16:$src, imm:$src2)),
(implicit SRW)]>;
def ADC8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"addc.b\t{$src2, $dst}",
- [(set GR8:$dst, (adde GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (adde GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def ADC16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"addc.w\t{$src2, $dst}",
- [(set GR16:$dst, (adde GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (adde GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def ADC8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"addc.b\t{$src, $dst}",
@@ -498,52 +503,52 @@ def ADC16mm : I8mm<0x0,
let isCommutable = 1 in { // X = AND Y, Z == X = AND Z, Y
def AND8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"and.b\t{$src2, $dst}",
- [(set GR8:$dst, (and GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (and GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def AND16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"and.w\t{$src2, $dst}",
- [(set GR16:$dst, (and GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (and GR16:$src, GR16:$src2)),
(implicit SRW)]>;
}
def AND8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"and.b\t{$src2, $dst}",
- [(set GR8:$dst, (and GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (and GR8:$src, imm:$src2)),
(implicit SRW)]>;
def AND16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"and.w\t{$src2, $dst}",
- [(set GR16:$dst, (and GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (and GR16:$src, imm:$src2)),
(implicit SRW)]>;
def AND8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"and.b\t{$src2, $dst}",
- [(set GR8:$dst, (and GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (and GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def AND16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"and.w\t{$src2, $dst}",
- [(set GR16:$dst, (and GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (and GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
let mayLoad = 1, hasExtraDefRegAllocReq = 1,
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
def AND8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR8:$dst, GR16:$base_wb),
- (ins GR8:$src1, GR16:$base),
+ (ins GR8:$src, GR16:$base),
"and.b\t{@$base+, $dst}", []>;
def AND16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$dst, GR16:$base_wb),
- (ins GR16:$src1, GR16:$base),
+ (ins GR16:$src, GR16:$base),
"and.w\t{@$base+, $dst}", []>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def AND8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"and.b\t{$src, $dst}",
@@ -582,46 +587,46 @@ def AND16mm : I16mm<0x0,
let isCommutable = 1 in { // X = OR Y, Z == X = OR Z, Y
def OR8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"bis.b\t{$src2, $dst}",
- [(set GR8:$dst, (or GR8:$src1, GR8:$src2))]>;
+ [(set GR8:$dst, (or GR8:$src, GR8:$src2))]>;
def OR16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"bis.w\t{$src2, $dst}",
- [(set GR16:$dst, (or GR16:$src1, GR16:$src2))]>;
+ [(set GR16:$dst, (or GR16:$src, GR16:$src2))]>;
}
def OR8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"bis.b\t{$src2, $dst}",
- [(set GR8:$dst, (or GR8:$src1, imm:$src2))]>;
+ [(set GR8:$dst, (or GR8:$src, imm:$src2))]>;
def OR16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"bis.w\t{$src2, $dst}",
- [(set GR16:$dst, (or GR16:$src1, imm:$src2))]>;
+ [(set GR16:$dst, (or GR16:$src, imm:$src2))]>;
def OR8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"bis.b\t{$src2, $dst}",
- [(set GR8:$dst, (or GR8:$src1, (load addr:$src2)))]>;
+ [(set GR8:$dst, (or GR8:$src, (load addr:$src2)))]>;
def OR16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"bis.w\t{$src2, $dst}",
- [(set GR16:$dst, (or GR16:$src1, (load addr:$src2)))]>;
+ [(set GR16:$dst, (or GR16:$src, (load addr:$src2)))]>;
let mayLoad = 1, hasExtraDefRegAllocReq = 1,
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
def OR8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR8:$dst, GR16:$base_wb),
- (ins GR8:$src1, GR16:$base),
+ (ins GR8:$src, GR16:$base),
"bis.b\t{@$base+, $dst}", []>;
def OR16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$dst, GR16:$base_wb),
- (ins GR16:$src1, GR16:$base),
+ (ins GR16:$src, GR16:$base),
"bis.w\t{@$base+, $dst}", []>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def OR8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"bis.b\t{$src, $dst}",
@@ -654,24 +659,24 @@ def OR16mm : I16mm<0x0,
// bic does not modify condition codes
def BIC8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"bic.b\t{$src2, $dst}",
- [(set GR8:$dst, (and GR8:$src1, (not GR8:$src2)))]>;
+ [(set GR8:$dst, (and GR8:$src, (not GR8:$src2)))]>;
def BIC16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"bic.w\t{$src2, $dst}",
- [(set GR16:$dst, (and GR16:$src1, (not GR16:$src2)))]>;
+ [(set GR16:$dst, (and GR16:$src, (not GR16:$src2)))]>;
def BIC8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"bic.b\t{$src2, $dst}",
- [(set GR8:$dst, (and GR8:$src1, (not (i8 (load addr:$src2)))))]>;
+ [(set GR8:$dst, (and GR8:$src, (not (i8 (load addr:$src2)))))]>;
def BIC16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"bic.w\t{$src2, $dst}",
- [(set GR16:$dst, (and GR16:$src1, (not (i16 (load addr:$src2)))))]>;
+ [(set GR16:$dst, (and GR16:$src, (not (i16 (load addr:$src2)))))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def BIC8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"bic.b\t{$src, $dst}",
@@ -695,52 +700,52 @@ def BIC16mm : I16mm<0x0,
let isCommutable = 1 in { // X = XOR Y, Z == X = XOR Z, Y
def XOR8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"xor.b\t{$src2, $dst}",
- [(set GR8:$dst, (xor GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (xor GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def XOR16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"xor.w\t{$src2, $dst}",
- [(set GR16:$dst, (xor GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (xor GR16:$src, GR16:$src2)),
(implicit SRW)]>;
}
def XOR8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"xor.b\t{$src2, $dst}",
- [(set GR8:$dst, (xor GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (xor GR8:$src, imm:$src2)),
(implicit SRW)]>;
def XOR16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"xor.w\t{$src2, $dst}",
- [(set GR16:$dst, (xor GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (xor GR16:$src, imm:$src2)),
(implicit SRW)]>;
def XOR8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"xor.b\t{$src2, $dst}",
- [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (xor GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def XOR16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"xor.w\t{$src2, $dst}",
- [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (xor GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
let mayLoad = 1, hasExtraDefRegAllocReq = 1,
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
def XOR8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR8:$dst, GR16:$base_wb),
- (ins GR8:$src1, GR16:$base),
+ (ins GR8:$src, GR16:$base),
"xor.b\t{@$base+, $dst}", []>;
def XOR16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$dst, GR16:$base_wb),
- (ins GR16:$src1, GR16:$base),
+ (ins GR16:$src, GR16:$base),
"xor.w\t{@$base+, $dst}", []>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def XOR8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"xor.b\t{$src, $dst}",
@@ -777,51 +782,51 @@ def XOR16mm : I16mm<0x0,
def SUB8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"sub.b\t{$src2, $dst}",
- [(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (sub GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def SUB16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"sub.w\t{$src2, $dst}",
- [(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (sub GR16:$src, GR16:$src2)),
(implicit SRW)]>;
def SUB8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"sub.b\t{$src2, $dst}",
- [(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (sub GR8:$src, imm:$src2)),
(implicit SRW)]>;
def SUB16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"sub.w\t{$src2, $dst}",
- [(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (sub GR16:$src, imm:$src2)),
(implicit SRW)]>;
def SUB8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"sub.b\t{$src2, $dst}",
- [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (sub GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def SUB16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"sub.w\t{$src2, $dst}",
- [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (sub GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
let mayLoad = 1, hasExtraDefRegAllocReq = 1,
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
def SUB8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR8:$dst, GR16:$base_wb),
- (ins GR8:$src1, GR16:$base),
+ (ins GR8:$src, GR16:$base),
"sub.b\t{@$base+, $dst}", []>;
def SUB16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
(outs GR16:$dst, GR16:$base_wb),
- (ins GR16:$src1, GR16:$base),
+ (ins GR16:$src, GR16:$base),
"sub.w\t{@$base+, $dst}", []>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def SUB8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"sub.b\t{$src, $dst}",
@@ -860,39 +865,39 @@ def SUB16mm : I16mm<0x0,
let Uses = [SRW] in {
def SBC8rr : I8rr<0x0,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+ (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
"subc.b\t{$src2, $dst}",
- [(set GR8:$dst, (sube GR8:$src1, GR8:$src2)),
+ [(set GR8:$dst, (sube GR8:$src, GR8:$src2)),
(implicit SRW)]>;
def SBC16rr : I16rr<0x0,
- (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+ (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
"subc.w\t{$src2, $dst}",
- [(set GR16:$dst, (sube GR16:$src1, GR16:$src2)),
+ [(set GR16:$dst, (sube GR16:$src, GR16:$src2)),
(implicit SRW)]>;
def SBC8ri : I8ri<0x0,
- (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+ (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
"subc.b\t{$src2, $dst}",
- [(set GR8:$dst, (sube GR8:$src1, imm:$src2)),
+ [(set GR8:$dst, (sube GR8:$src, imm:$src2)),
(implicit SRW)]>;
def SBC16ri : I16ri<0x0,
- (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+ (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
"subc.w\t{$src2, $dst}",
- [(set GR16:$dst, (sube GR16:$src1, imm:$src2)),
+ [(set GR16:$dst, (sube GR16:$src, imm:$src2)),
(implicit SRW)]>;
def SBC8rm : I8rm<0x0,
- (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+ (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
"subc.b\t{$src2, $dst}",
- [(set GR8:$dst, (sube GR8:$src1, (load addr:$src2))),
+ [(set GR8:$dst, (sube GR8:$src, (load addr:$src2))),
(implicit SRW)]>;
def SBC16rm : I16rm<0x0,
- (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+ (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
"subc.w\t{$src2, $dst}",
- [(set GR16:$dst, (sube GR16:$src1, (load addr:$src2))),
+ [(set GR16:$dst, (sube GR16:$src, (load addr:$src2))),
(implicit SRW)]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def SBC8mr : I8mr<0x0,
(outs), (ins memdst:$dst, GR8:$src),
"subc.b\t{$src, $dst}",
@@ -985,59 +990,59 @@ def SWPB16r : II16r<0x0,
"swpb\t$dst",
[(set GR16:$dst, (bswap GR16:$src))]>;
-} // isTwoAddress = 1
+} // Constraints = "$src = $dst"
// Integer comparisons
let Defs = [SRW] in {
def CMP8rr : I8rr<0x0,
- (outs), (ins GR8:$src1, GR8:$src2),
- "cmp.b\t{$src2, $src1}",
- [(MSP430cmp GR8:$src1, GR8:$src2), (implicit SRW)]>;
+ (outs), (ins GR8:$src, GR8:$src2),
+ "cmp.b\t{$src2, $src}",
+ [(MSP430cmp GR8:$src, GR8:$src2), (implicit SRW)]>;
def CMP16rr : I16rr<0x0,
- (outs), (ins GR16:$src1, GR16:$src2),
- "cmp.w\t{$src2, $src1}",
- [(MSP430cmp GR16:$src1, GR16:$src2), (implicit SRW)]>;
+ (outs), (ins GR16:$src, GR16:$src2),
+ "cmp.w\t{$src2, $src}",
+ [(MSP430cmp GR16:$src, GR16:$src2), (implicit SRW)]>;
def CMP8ri : I8ri<0x0,
- (outs), (ins GR8:$src1, i8imm:$src2),
- "cmp.b\t{$src2, $src1}",
- [(MSP430cmp GR8:$src1, imm:$src2), (implicit SRW)]>;
+ (outs), (ins GR8:$src, i8imm:$src2),
+ "cmp.b\t{$src2, $src}",
+ [(MSP430cmp GR8:$src, imm:$src2), (implicit SRW)]>;
def CMP16ri : I16ri<0x0,
- (outs), (ins GR16:$src1, i16imm:$src2),
- "cmp.w\t{$src2, $src1}",
- [(MSP430cmp GR16:$src1, imm:$src2), (implicit SRW)]>;
+ (outs), (ins GR16:$src, i16imm:$src2),
+ "cmp.w\t{$src2, $src}",
+ [(MSP430cmp GR16:$src, imm:$src2), (implicit SRW)]>;
def CMP8mi : I8mi<0x0,
- (outs), (ins memsrc:$src1, i8imm:$src2),
- "cmp.b\t{$src2, $src1}",
- [(MSP430cmp (load addr:$src1),
+ (outs), (ins memsrc:$src, i8imm:$src2),
+ "cmp.b\t{$src2, $src}",
+ [(MSP430cmp (load addr:$src),
(i8 imm:$src2)), (implicit SRW)]>;
def CMP16mi : I16mi<0x0,
- (outs), (ins memsrc:$src1, i16imm:$src2),
- "cmp.w\t{$src2, $src1}",
- [(MSP430cmp (load addr:$src1),
+ (outs), (ins memsrc:$src, i16imm:$src2),
+ "cmp.w\t{$src2, $src}",
+ [(MSP430cmp (load addr:$src),
(i16 imm:$src2)), (implicit SRW)]>;
def CMP8rm : I8rm<0x0,
- (outs), (ins GR8:$src1, memsrc:$src2),
- "cmp.b\t{$src2, $src1}",
- [(MSP430cmp GR8:$src1, (load addr:$src2)),
+ (outs), (ins GR8:$src, memsrc:$src2),
+ "cmp.b\t{$src2, $src}",
+ [(MSP430cmp GR8:$src, (load addr:$src2)),
(implicit SRW)]>;
def CMP16rm : I16rm<0x0,
- (outs), (ins GR16:$src1, memsrc:$src2),
- "cmp.w\t{$src2, $src1}",
- [(MSP430cmp GR16:$src1, (load addr:$src2)),
+ (outs), (ins GR16:$src, memsrc:$src2),
+ "cmp.w\t{$src2, $src}",
+ [(MSP430cmp GR16:$src, (load addr:$src2)),
(implicit SRW)]>;
def CMP8mr : I8mr<0x0,
- (outs), (ins memsrc:$src1, GR8:$src2),
- "cmp.b\t{$src2, $src1}",
- [(MSP430cmp (load addr:$src1), GR8:$src2),
+ (outs), (ins memsrc:$src, GR8:$src2),
+ "cmp.b\t{$src2, $src}",
+ [(MSP430cmp (load addr:$src), GR8:$src2),
(implicit SRW)]>;
def CMP16mr : I16mr<0x0,
- (outs), (ins memsrc:$src1, GR16:$src2),
- "cmp.w\t{$src2, $src1}",
- [(MSP430cmp (load addr:$src1), GR16:$src2),
+ (outs), (ins memsrc:$src, GR16:$src2),
+ "cmp.w\t{$src2, $src}",
+ [(MSP430cmp (load addr:$src), GR16:$src2),
(implicit SRW)]>;
@@ -1045,71 +1050,71 @@ def CMP16mr : I16mr<0x0,
// Note that the C condition is set differently than when using CMP.
let isCommutable = 1 in {
def BIT8rr : I8rr<0x0,
- (outs), (ins GR8:$src1, GR8:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su GR8:$src1, GR8:$src2), 0),
+ (outs), (ins GR8:$src, GR8:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su GR8:$src, GR8:$src2), 0),
(implicit SRW)]>;
def BIT16rr : I16rr<0x0,
- (outs), (ins GR16:$src1, GR16:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su GR16:$src1, GR16:$src2), 0),
+ (outs), (ins GR16:$src, GR16:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su GR16:$src, GR16:$src2), 0),
(implicit SRW)]>;
}
def BIT8ri : I8ri<0x0,
- (outs), (ins GR8:$src1, i8imm:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su GR8:$src1, imm:$src2), 0),
+ (outs), (ins GR8:$src, i8imm:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su GR8:$src, imm:$src2), 0),
(implicit SRW)]>;
def BIT16ri : I16ri<0x0,
- (outs), (ins GR16:$src1, i16imm:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su GR16:$src1, imm:$src2), 0),
+ (outs), (ins GR16:$src, i16imm:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su GR16:$src, imm:$src2), 0),
(implicit SRW)]>;
def BIT8rm : I8rm<0x0,
- (outs), (ins GR8:$src1, memdst:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su GR8:$src1, (load addr:$src2)), 0),
+ (outs), (ins GR8:$src, memdst:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su GR8:$src, (load addr:$src2)), 0),
(implicit SRW)]>;
def BIT16rm : I16rm<0x0,
- (outs), (ins GR16:$src1, memdst:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su GR16:$src1, (load addr:$src2)), 0),
+ (outs), (ins GR16:$src, memdst:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su GR16:$src, (load addr:$src2)), 0),
(implicit SRW)]>;
def BIT8mr : I8mr<0x0,
- (outs), (ins memsrc:$src1, GR8:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su (load addr:$src1), GR8:$src2), 0),
+ (outs), (ins memsrc:$src, GR8:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su (load addr:$src), GR8:$src2), 0),
(implicit SRW)]>;
def BIT16mr : I16mr<0x0,
- (outs), (ins memsrc:$src1, GR16:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su (load addr:$src1), GR16:$src2), 0),
+ (outs), (ins memsrc:$src, GR16:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su (load addr:$src), GR16:$src2), 0),
(implicit SRW)]>;
def BIT8mi : I8mi<0x0,
- (outs), (ins memsrc:$src1, i8imm:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su (load addr:$src1), (i8 imm:$src2)), 0),
+ (outs), (ins memsrc:$src, i8imm:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su (load addr:$src), (i8 imm:$src2)), 0),
(implicit SRW)]>;
def BIT16mi : I16mi<0x0,
- (outs), (ins memsrc:$src1, i16imm:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su (load addr:$src1), (i16 imm:$src2)), 0),
+ (outs), (ins memsrc:$src, i16imm:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su (load addr:$src), (i16 imm:$src2)), 0),
(implicit SRW)]>;
def BIT8mm : I8mm<0x0,
- (outs), (ins memsrc:$src1, memsrc:$src2),
- "bit.b\t{$src2, $src1}",
- [(MSP430cmp (and_su (i8 (load addr:$src1)),
+ (outs), (ins memsrc:$src, memsrc:$src2),
+ "bit.b\t{$src2, $src}",
+ [(MSP430cmp (and_su (i8 (load addr:$src)),
(load addr:$src2)),
0),
(implicit SRW)]>;
def BIT16mm : I16mm<0x0,
- (outs), (ins memsrc:$src1, memsrc:$src2),
- "bit.w\t{$src2, $src1}",
- [(MSP430cmp (and_su (i16 (load addr:$src1)),
+ (outs), (ins memsrc:$src, memsrc:$src2),
+ "bit.w\t{$src2, $src}",
+ [(MSP430cmp (and_su (i16 (load addr:$src)),
(load addr:$src2)),
0),
(implicit SRW)]>;
@@ -1134,12 +1139,12 @@ def : Pat<(i16 (MSP430Wrapper tglobaladdr:$dst)), (MOV16ri tglobaladdr:$dst)>;
def : Pat<(i16 (MSP430Wrapper texternalsym:$dst)), (MOV16ri texternalsym:$dst)>;
def : Pat<(i16 (MSP430Wrapper tblockaddress:$dst)), (MOV16ri tblockaddress:$dst)>;
-def : Pat<(add GR16:$src1, (MSP430Wrapper tglobaladdr :$src2)),
- (ADD16ri GR16:$src1, tglobaladdr:$src2)>;
-def : Pat<(add GR16:$src1, (MSP430Wrapper texternalsym:$src2)),
- (ADD16ri GR16:$src1, texternalsym:$src2)>;
-def : Pat<(add GR16:$src1, (MSP430Wrapper tblockaddress:$src2)),
- (ADD16ri GR16:$src1, tblockaddress:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper tglobaladdr :$src2)),
+ (ADD16ri GR16:$src, tglobaladdr:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper texternalsym:$src2)),
+ (ADD16ri GR16:$src, texternalsym:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper tblockaddress:$src2)),
+ (ADD16ri GR16:$src, tblockaddress:$src2)>;
def : Pat<(store (i16 (MSP430Wrapper tglobaladdr:$src)), addr:$dst),
(MOV16mi addr:$dst, tglobaladdr:$src)>;
@@ -1155,45 +1160,45 @@ def : Pat<(MSP430call (i16 texternalsym:$dst)),
(CALLi texternalsym:$dst)>;
// add and sub always produce carry
-def : Pat<(addc GR16:$src1, GR16:$src2),
- (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(addc GR16:$src1, (load addr:$src2)),
- (ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(addc GR16:$src1, imm:$src2),
- (ADD16ri GR16:$src1, imm:$src2)>;
+def : Pat<(addc GR16:$src, GR16:$src2),
+ (ADD16rr GR16:$src, GR16:$src2)>;
+def : Pat<(addc GR16:$src, (load addr:$src2)),
+ (ADD16rm GR16:$src, addr:$src2)>;
+def : Pat<(addc GR16:$src, imm:$src2),
+ (ADD16ri GR16:$src, imm:$src2)>;
def : Pat<(store (addc (load addr:$dst), GR16:$src), addr:$dst),
(ADD16mr addr:$dst, GR16:$src)>;
def : Pat<(store (addc (load addr:$dst), (i16 (load addr:$src))), addr:$dst),
(ADD16mm addr:$dst, addr:$src)>;
-def : Pat<(addc GR8:$src1, GR8:$src2),
- (ADD8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(addc GR8:$src1, (load addr:$src2)),
- (ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(addc GR8:$src1, imm:$src2),
- (ADD8ri GR8:$src1, imm:$src2)>;
+def : Pat<(addc GR8:$src, GR8:$src2),
+ (ADD8rr GR8:$src, GR8:$src2)>;
+def : Pat<(addc GR8:$src, (load addr:$src2)),
+ (ADD8rm GR8:$src, addr:$src2)>;
+def : Pat<(addc GR8:$src, imm:$src2),
+ (ADD8ri GR8:$src, imm:$src2)>;
def : Pat<(store (addc (load addr:$dst), GR8:$src), addr:$dst),
(ADD8mr addr:$dst, GR8:$src)>;
def : Pat<(store (addc (load addr:$dst), (i8 (load addr:$src))), addr:$dst),
(ADD8mm addr:$dst, addr:$src)>;
-def : Pat<(subc GR16:$src1, GR16:$src2),
- (SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(subc GR16:$src1, (load addr:$src2)),
- (SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(subc GR16:$src1, imm:$src2),
- (SUB16ri GR16:$src1, imm:$src2)>;
+def : Pat<(subc GR16:$src, GR16:$src2),
+ (SUB16rr GR16:$src, GR16:$src2)>;
+def : Pat<(subc GR16:$src, (load addr:$src2)),
+ (SUB16rm GR16:$src, addr:$src2)>;
+def : Pat<(subc GR16:$src, imm:$src2),
+ (SUB16ri GR16:$src, imm:$src2)>;
def : Pat<(store (subc (load addr:$dst), GR16:$src), addr:$dst),
(SUB16mr addr:$dst, GR16:$src)>;
def : Pat<(store (subc (load addr:$dst), (i16 (load addr:$src))), addr:$dst),
(SUB16mm addr:$dst, addr:$src)>;
-def : Pat<(subc GR8:$src1, GR8:$src2),
- (SUB8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(subc GR8:$src1, (load addr:$src2)),
- (SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(subc GR8:$src1, imm:$src2),
- (SUB8ri GR8:$src1, imm:$src2)>;
+def : Pat<(subc GR8:$src, GR8:$src2),
+ (SUB8rr GR8:$src, GR8:$src2)>;
+def : Pat<(subc GR8:$src, (load addr:$src2)),
+ (SUB8rm GR8:$src, addr:$src2)>;
+def : Pat<(subc GR8:$src, imm:$src2),
+ (SUB8ri GR8:$src, imm:$src2)>;
def : Pat<(store (subc (load addr:$dst), GR8:$src), addr:$dst),
(SUB8mr addr:$dst, GR8:$src)>;
def : Pat<(store (subc (load addr:$dst), (i8 (load addr:$src))), addr:$dst),
@@ -1201,6 +1206,6 @@ def : Pat<(store (subc (load addr:$dst), (i8 (load addr:$src))), addr:$dst),
// peephole patterns
def : Pat<(and GR16:$src, 255), (ZEXT16r GR16:$src)>;
-def : Pat<(MSP430cmp (trunc (and_su GR16:$src1, GR16:$src2)), 0),
- (BIT8rr (EXTRACT_SUBREG GR16:$src1, subreg_8bit),
+def : Pat<(MSP430cmp (trunc (and_su GR16:$src, GR16:$src2)), 0),
+ (BIT8rr (EXTRACT_SUBREG GR16:$src, subreg_8bit),
(EXTRACT_SUBREG GR16:$src2, subreg_8bit))>;
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.cpp
index cfb499d..3f44944 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.cpp
@@ -14,7 +14,7 @@
#include "MSP430MCAsmInfo.h"
using namespace llvm;
-MSP430MCAsmInfo::MSP430MCAsmInfo(const Target &T, const StringRef &TT) {
+MSP430MCAsmInfo::MSP430MCAsmInfo(const Target &T, StringRef TT) {
PrivateGlobalPrefix = ".L";
WeakRefDirective ="\t.weak\t";
PCSymbol=".";
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.h
index 8318029..f3138a2 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430MCAsmInfo.h
@@ -14,13 +14,14 @@
#ifndef MSP430TARGETASMINFO_H
#define MSP430TARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
+
struct MSP430MCAsmInfo : public MCAsmInfo {
- explicit MSP430MCAsmInfo(const Target &T, const StringRef &TT);
+ explicit MSP430MCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
index 0cae267..608ca49 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp
@@ -71,48 +71,6 @@ MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
}
-const TargetRegisterClass *const *
-MSP430RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- const Function* F = MF->getFunction();
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClassesFP[] = {
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClassesIntr[] = {
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClassesIntrFP[] = {
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, &MSP430::GR16RegClass,
- &MSP430::GR16RegClass, 0
- };
-
- if (hasFP(*MF))
- return (F->getCallingConv() == CallingConv::MSP430_INTR ?
- CalleeSavedRegClassesIntrFP : CalleeSavedRegClassesFP);
- else
- return (F->getCallingConv() == CallingConv::MSP430_INTR ?
- CalleeSavedRegClassesIntr : CalleeSavedRegClasses);
-}
-
BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
@@ -270,8 +228,8 @@ MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
const {
// Create a frame entry for the FPW register that must be saved.
if (hasFP(MF)) {
- int ATTRIBUTE_UNUSED FrameIdx =
- MF.getFrameInfo()->CreateFixedObject(2, -4, true, false);
+ int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true);
+ (void)FrameIdx;
assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
"Slot for FPW register must be last in order to be found!");
}
diff --git a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
index c8684df..6e58d31 100644
--- a/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
+++ b/contrib/llvm/lib/Target/MSP430/MSP430RegisterInfo.h
@@ -36,9 +36,6 @@ public:
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
const TargetRegisterClass* getPointerRegClass(unsigned Kind = 0) const;
diff --git a/contrib/llvm/lib/Target/Mangler.cpp b/contrib/llvm/lib/Target/Mangler.cpp
index 4ef017a..2037a91 100644
--- a/contrib/llvm/lib/Target/Mangler.cpp
+++ b/contrib/llvm/lib/Target/Mangler.cpp
@@ -180,7 +180,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl<char> &OutName,
ManglerPrefixTy PrefixTy = Mangler::Default;
if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
PrefixTy = Mangler::Private;
- else if (GV->hasLinkerPrivateLinkage())
+ else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage())
PrefixTy = Mangler::LinkerPrivate;
// If this global has a name, handle it simply.
diff --git a/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp
index 4d7fe4c..8ae05b7 100644
--- a/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp
@@ -133,8 +133,9 @@ void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- unsigned RegNum = MipsRegisterInfo::getRegisterNumbering(CSI[i].getReg());
- if (CSI[i].getRegClass() == Mips::CPURegsRegisterClass)
+ unsigned Reg = CSI[i].getReg();
+ unsigned RegNum = MipsRegisterInfo::getRegisterNumbering(Reg);
+ if (Mips::CPURegsRegisterClass->contains(Reg))
CPUBitmask |= (1 << RegNum);
else
FPUBitmask |= (1 << RegNum);
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
index e979c3f..b6ff2c3 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -284,6 +284,18 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, copy0MBB);
+ F->insert(It, sinkMBB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Next, add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
// Emit the right instruction according to the type of the operands compared
if (isFPCmp) {
@@ -296,20 +308,6 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
BuildMI(BB, dl, TII->get(Mips::BNE)).addReg(MI->getOperand(1).getReg())
.addReg(Mips::ZERO).addMBB(sinkMBB);
- F->insert(It, copy0MBB);
- F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for(MachineBasicBlock::succ_iterator i = BB->succ_begin(),
- e = BB->succ_end(); i != e; ++i)
- sinkMBB->addSuccessor(*i);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while(!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(sinkMBB);
-
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -322,11 +320,12 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII->get(Mips::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII->get(Mips::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(3).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
}
@@ -490,21 +489,21 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
// %gp_rel relocation
if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32, 0,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GPREL);
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
}
// %hi/%lo relocation
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32, 0,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_ABS_HILO);
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, VTs, &GA, 1);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
} else {
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32, 0,
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GOT);
SDValue ResNode = DAG.getLoad(MVT::i32, dl,
DAG.getEntryNode(), GA, NULL, 0,
@@ -768,6 +767,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -787,7 +787,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// the stack (even if less than 4 are used as arguments)
if (Subtarget->isABI_O32()) {
int VTsize = EVT(MVT::i32).getSizeInBits()/8;
- MFI->CreateFixedObject(VTsize, (VTsize*3), true, false);
+ MFI->CreateFixedObject(VTsize, (VTsize*3), true);
CCInfo.AnalyzeCallOperands(Outs,
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
} else
@@ -808,7 +808,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
CCValAssign &VA = ArgLocs[i];
// Promote the value if needed.
@@ -857,7 +857,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// if O32 ABI is used. For EABI the first address is zero.
LastArgStackLoc = (FirstStackArgLoc + VA.getLocMemOffset());
int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
- LastArgStackLoc, true, false);
+ LastArgStackLoc, true);
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
@@ -889,7 +889,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// node so that legalize doesn't hack it.
unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(),
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
getPointerTy(), 0, OpFlag);
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
@@ -929,7 +929,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Create the frame index only once. SPOffset here can be anything
// (this will be fixed on processFunctionBeforeFrameFinalized)
if (MipsFI->getGPStackOffset() == -1) {
- FI = MFI->CreateFixedObject(4, 0, true, false);
+ FI = MFI->CreateFixedObject(4, 0, true);
MipsFI->setGPFI(FI);
}
MipsFI->setGPStackOffset(LastArgStackLoc);
@@ -1098,7 +1098,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit.
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
- int FI = MFI->CreateFixedObject(ArgSize, 0, true, false);
+ int FI = MFI->CreateFixedObject(ArgSize, 0, true);
MipsFI->recordLoadArgsFI(FI, -(ArgSize+
(FirstStackArgLoc + VA.getLocMemOffset())));
@@ -1137,7 +1137,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegEnd, RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, MVT::i32);
- int FI = MFI->CreateFixedObject(4, 0, true, false);
+ int FI = MFI->CreateFixedObject(4, 0, true);
MipsFI->recordStoreVarArgsFI(FI, -(4+(StackLoc*4)));
SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, NULL, 0,
@@ -1169,6 +1169,7 @@ SDValue
MipsTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
@@ -1198,7 +1199,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
// guarantee that all emitted copies are
// stuck together, avoiding something bad
diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
index f2de489..460747b 100644
--- a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -120,6 +120,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -128,6 +129,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
virtual MachineBasicBlock *
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 4005e35..6c09a3e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -127,61 +127,75 @@ insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const
BuildMI(MBB, MI, DL, get(Mips::NOP));
}
-bool MipsInstrInfo::
-copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
+void MipsInstrInfo::
+copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ bool DestCPU = Mips::CPURegsRegClass.contains(DestReg);
+ bool SrcCPU = Mips::CPURegsRegClass.contains(SrcReg);
+
+ // CPU-CPU is the most common.
+ if (DestCPU && SrcCPU) {
+ BuildMI(MBB, I, DL, get(Mips::ADDu), DestReg).addReg(Mips::ZERO)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
- if (DestRC != SrcRC) {
-
- // Copy to/from FCR31 condition register
- if ((DestRC == Mips::CPURegsRegisterClass) &&
- (SrcRC == Mips::CCRRegisterClass))
- BuildMI(MBB, I, DL, get(Mips::CFC1), DestReg).addReg(SrcReg);
- else if ((DestRC == Mips::CCRRegisterClass) &&
- (SrcRC == Mips::CPURegsRegisterClass))
- BuildMI(MBB, I, DL, get(Mips::CTC1), DestReg).addReg(SrcReg);
-
- // Moves between coprocessors and cpu
- else if ((DestRC == Mips::CPURegsRegisterClass) &&
- (SrcRC == Mips::FGR32RegisterClass))
- BuildMI(MBB, I, DL, get(Mips::MFC1), DestReg).addReg(SrcReg);
- else if ((DestRC == Mips::FGR32RegisterClass) &&
- (SrcRC == Mips::CPURegsRegisterClass))
- BuildMI(MBB, I, DL, get(Mips::MTC1), DestReg).addReg(SrcReg);
-
- // Move from/to Hi/Lo registers
- else if ((DestRC == Mips::HILORegisterClass) &&
- (SrcRC == Mips::CPURegsRegisterClass)) {
- unsigned Opc = (DestReg == Mips::HI) ? Mips::MTHI : Mips::MTLO;
- BuildMI(MBB, I, DL, get(Opc), DestReg);
- } else if ((SrcRC == Mips::HILORegisterClass) &&
- (DestRC == Mips::CPURegsRegisterClass)) {
- unsigned Opc = (SrcReg == Mips::HI) ? Mips::MFHI : Mips::MFLO;
- BuildMI(MBB, I, DL, get(Opc), DestReg);
- } else
- // Can't copy this register
- return false;
+ // Copy to CPU from other registers.
+ if (DestCPU) {
+ if (Mips::CCRRegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(Mips::CFC1), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (Mips::FGR32RegClass.contains(SrcReg))
+ BuildMI(MBB, I, DL, get(Mips::MFC1), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (SrcReg == Mips::HI)
+ BuildMI(MBB, I, DL, get(Mips::MFHI), DestReg);
+ else if (SrcReg == Mips::LO)
+ BuildMI(MBB, I, DL, get(Mips::MFLO), DestReg);
+ else
+ llvm_unreachable("Copy to CPU from invalid register");
+ return;
+ }
- return true;
+ // Copy to other registers from CPU.
+ if (SrcCPU) {
+ if (Mips::CCRRegClass.contains(DestReg))
+ BuildMI(MBB, I, DL, get(Mips::CTC1), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (Mips::FGR32RegClass.contains(DestReg))
+ BuildMI(MBB, I, DL, get(Mips::MTC1), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestReg == Mips::HI)
+ BuildMI(MBB, I, DL, get(Mips::MTHI))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (DestReg == Mips::LO)
+ BuildMI(MBB, I, DL, get(Mips::MTLO))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else
+ llvm_unreachable("Copy from CPU to invalid register");
+ return;
}
- if (DestRC == Mips::CPURegsRegisterClass)
- BuildMI(MBB, I, DL, get(Mips::ADDu), DestReg).addReg(Mips::ZERO)
- .addReg(SrcReg);
- else if (DestRC == Mips::FGR32RegisterClass)
- BuildMI(MBB, I, DL, get(Mips::FMOV_S32), DestReg).addReg(SrcReg);
- else if (DestRC == Mips::AFGR64RegisterClass)
- BuildMI(MBB, I, DL, get(Mips::FMOV_D32), DestReg).addReg(SrcReg);
- else if (DestRC == Mips::CCRRegisterClass)
- BuildMI(MBB, I, DL, get(Mips::MOVCCRToCCR), DestReg).addReg(SrcReg);
- else
- // Can't copy this register
- return false;
+ if (Mips::FGR32RegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(Mips::FMOV_S32), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
- return true;
+ if (Mips::AFGR64RegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(Mips::FMOV_D32), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+
+ if (Mips::CCRRegClass.contains(DestReg, SrcReg)) {
+ BuildMI(MBB, I, DL, get(Mips::MOVCCRToCCR), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ llvm_unreachable("Cannot copy registers");
}
void MipsInstrInfo::
@@ -247,80 +261,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
llvm_unreachable("Register class not handled!");
}
-MachineInstr *MipsInstrInfo::
-foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops, int FI) const
-{
- if (Ops.size() != 1) return NULL;
-
- MachineInstr *NewMI = NULL;
-
- switch (MI->getOpcode()) {
- case Mips::ADDu:
- if ((MI->getOperand(0).isReg()) &&
- (MI->getOperand(1).isReg()) &&
- (MI->getOperand(1).getReg() == Mips::ZERO) &&
- (MI->getOperand(2).isReg())) {
- if (Ops[0] == 0) { // COPY -> STORE
- unsigned SrcReg = MI->getOperand(2).getReg();
- bool isKill = MI->getOperand(2).isKill();
- bool isUndef = MI->getOperand(2).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::SW))
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI);
- } else { // COPY -> LOAD
- unsigned DstReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::LW))
- .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
- getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI);
- }
- }
- break;
- case Mips::FMOV_S32:
- case Mips::FMOV_D32:
- if ((MI->getOperand(0).isReg()) &&
- (MI->getOperand(1).isReg())) {
- const TargetRegisterClass
- *RC = RI.getRegClass(MI->getOperand(0).getReg());
- unsigned StoreOpc, LoadOpc;
- bool IsMips1 = TM.getSubtarget<MipsSubtarget>().isMips1();
-
- if (RC == Mips::FGR32RegisterClass) {
- LoadOpc = Mips::LWC1; StoreOpc = Mips::SWC1;
- } else {
- assert(RC == Mips::AFGR64RegisterClass);
- // Mips1 doesn't have ldc/sdc instructions.
- if (IsMips1) break;
- LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
- }
-
- if (Ops[0] == 0) { // COPY -> STORE
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(2).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(StoreOpc))
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI) ;
- } else { // COPY -> LOAD
- unsigned DstReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(LoadOpc))
- .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
- getUndefRegState(isUndef))
- .addImm(0).addFrameIndex(FI);
- }
- }
- break;
- }
-
- return NewMI;
-}
-
//===----------------------------------------------------------------------===//
// Branch Analysis
//===----------------------------------------------------------------------===//
@@ -520,9 +460,8 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned MipsInstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 3 || Cond.size() == 2 || Cond.size() == 0) &&
@@ -531,18 +470,18 @@ InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch?
- BuildMI(&MBB, dl, get(Mips::J)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Mips::J)).addMBB(TBB);
} else {
// Conditional branch.
unsigned Opc = GetCondBranchFromCond((Mips::CondCode)Cond[0].getImm());
const TargetInstrDesc &TID = get(Opc);
if (TID.getNumOperands() == 3)
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg())
.addReg(Cond[2].getReg())
.addMBB(TBB);
else
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg())
.addMBB(TBB);
}
@@ -554,12 +493,12 @@ InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
const TargetInstrDesc &TID = get(Opc);
if (TID.getNumOperands() == 3)
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg()).addReg(Cond[2].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg()).addReg(Cond[2].getReg())
.addMBB(TBB);
else
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg()).addMBB(TBB);
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(Mips::J)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(Mips::J)).addMBB(FBB);
return 2;
}
@@ -621,12 +560,8 @@ unsigned MipsInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
GlobalBaseReg = RegInfo.createVirtualRegister(Mips::CPURegsRegisterClass);
- bool Ok = TII->copyRegToReg(FirstMBB, MBBI, GlobalBaseReg, Mips::GP,
- Mips::CPURegsRegisterClass,
- Mips::CPURegsRegisterClass,
- DebugLoc());
- assert(Ok && "Couldn't assign to global base register!");
- Ok = Ok; // Silence warning when assertions are turned off.
+ BuildMI(FirstMBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),
+ GlobalBaseReg).addReg(Mips::GP);
RegInfo.addLiveIn(Mips::GP);
MipsFI->setGlobalBaseReg(GlobalBaseReg);
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
index 7919d9a..d6f87f9 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -204,13 +204,12 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -223,18 +222,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
index 2b9e941..5337c9f 100644
--- a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -541,7 +541,7 @@ let Predicates = [HasSwap] in {
def MIPS_CMOV_ZERO : PatLeaf<(i32 0)>;
def MIPS_CMOV_NZERO : PatLeaf<(i32 1)>;
-let Predicates = [HasCondMov], isTwoAddress = 1 in {
+let Predicates = [HasCondMov], Constraints = "$F = $dst" in {
def MOVN : CondMov<0x0a, "movn", MIPS_CMOV_NZERO>;
def MOVZ : CondMov<0x0b, "movz", MIPS_CMOV_ZERO>;
}
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.cpp
index 89e3e11..fe48ab7 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.cpp
@@ -14,7 +14,7 @@
#include "MipsMCAsmInfo.h"
using namespace llvm;
-MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, const StringRef &TT) {
+MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
AlignmentIsInBytes = false;
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
diff --git a/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.h b/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.h
index 33a4b5e..15a867e 100644
--- a/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsMCAsmInfo.h
@@ -14,15 +14,15 @@
#ifndef MIPSTARGETASMINFO_H
#define MIPSTARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
class MipsMCAsmInfo : public MCAsmInfo {
public:
- explicit MipsMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit MipsMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
index 5e719af..e15f0a5 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -116,34 +116,6 @@ getCalleeSavedRegs(const MachineFunction *MF) const
return BitMode32CalleeSavedRegs;
}
-/// Mips Callee Saved Register Classes
-const TargetRegisterClass* const*
-MipsRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const
-{
- static const TargetRegisterClass * const SingleFloatOnlyCalleeSavedRC[] = {
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, &Mips::FGR32RegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, &Mips::FGR32RegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, &Mips::FGR32RegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, 0
- };
-
- static const TargetRegisterClass * const BitMode32CalleeSavedRC[] = {
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::CPURegsRegClass, &Mips::CPURegsRegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, &Mips::FGR32RegClass,
- &Mips::FGR32RegClass, &Mips::FGR32RegClass, &Mips::FGR32RegClass, 0
- };
-
- if (Subtarget.isSingleFloat())
- return SingleFloatOnlyCalleeSavedRC;
- else
- return BitMode32CalleeSavedRC;
-}
-
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const
{
@@ -279,7 +251,8 @@ void MipsRegisterInfo::adjustMipsStackFrame(MachineFunction &MF) const
StackOffset = ((StackOffset+StackAlign-1)/StackAlign*StackAlign);
for (unsigned i = 0, e = CSI.size(); i != e ; ++i) {
- if (CSI[i].getRegClass() != Mips::CPURegsRegisterClass)
+ unsigned Reg = CSI[i].getReg();
+ if (!Mips::CPURegsRegisterClass->contains(Reg))
break;
MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
TopCPUSavedRegOff = StackOffset;
@@ -311,7 +284,8 @@ void MipsRegisterInfo::adjustMipsStackFrame(MachineFunction &MF) const
// Adjust FPU Callee Saved Registers Area. This Area must be
// aligned to the default Stack Alignment requirements.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- if (CSI[i].getRegClass() == Mips::CPURegsRegisterClass)
+ unsigned Reg = CSI[i].getReg();
+ if (Mips::CPURegsRegisterClass->contains(Reg))
continue;
MFI->setObjectOffset(CSI[i].getFrameIdx(), StackOffset);
TopFPUSavedRegOff = StackOffset;
@@ -528,4 +502,3 @@ getDwarfRegNum(unsigned RegNum, bool isEH) const {
}
#include "MipsGenRegisterInfo.inc"
-
diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
index bc857b8..b500a65 100644
--- a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h
@@ -42,9 +42,6 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction* MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp
index 6a4d0d6..7a948de 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16DebugInfo.cpp
@@ -416,7 +416,7 @@ void PIC16DbgInfo::EmitAuxEntry(const std::string VarName, int Aux[], int Num,
if (!TagName.empty()) Tmp += ", " + TagName;
for (int i = 0; i<Num; i++)
- Tmp += "," + utostr(Aux[i] && 0xff);
+ Tmp += "," + utostr(Aux[i] & 0xff);
OS.EmitRawText("\n\t.dim " + Twine(VarName) + ", 1" + Tmp);
}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp
index f479f46..54a6a28 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.cpp
@@ -672,7 +672,8 @@ SDValue PIC16TargetLowering::ExpandGlobalAddress(SDNode *N,
// FIXME there isn't really debug info here
DebugLoc dl = G->getDebugLoc();
- SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i8,
+ SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), N->getDebugLoc(),
+ MVT::i8,
G->getOffset());
SDValue Offset = DAG.getConstant(0, MVT::i8);
@@ -1120,6 +1121,7 @@ SDValue PIC16TargetLowering::
LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG) const {
unsigned NumOps = Outs.size();
@@ -1136,7 +1138,7 @@ LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
unsigned RetVals = Ins.size();
for (unsigned i = 0, ArgOffset = RetVals; i < NumOps; i++) {
// Get the arguments
- Arg = Outs[i].Val;
+ Arg = OutVals[i];
Ops.clear();
Ops.push_back(Chain);
@@ -1158,6 +1160,7 @@ LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
SDValue PIC16TargetLowering::
LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
unsigned NumOps = Outs.size();
std::string Name;
@@ -1183,7 +1186,7 @@ LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
for (unsigned i=0, Offset = 0; i<NumOps; i++) {
// Get the argument
- Arg = Outs[i].Val;
+ Arg = OutVals[i];
StoreOffset = (Offset + AddressOffset);
// Store the argument on frame
@@ -1282,6 +1285,7 @@ SDValue
PIC16TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// Number of values to return
@@ -1298,7 +1302,7 @@ PIC16TargetLowering::LowerReturn(SDValue Chain,
SDValue BS = DAG.getConstant(1, MVT::i8);
SDValue RetVal;
for(unsigned i=0;i<NumRet; ++i) {
- RetVal = Outs[i].Val;
+ RetVal = OutVals[i];
Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal,
ES, BS,
DAG.getConstant (i, MVT::i8));
@@ -1374,6 +1378,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1428,7 +1433,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Considering the GlobalAddressNode case here.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, MVT::i8);
+ Callee = DAG.getTargetGlobalAddress(GV, dl, MVT::i8);
Name = G->getGlobal()->getName();
} else {// Considering the ExternalSymbol case here
ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Callee);
@@ -1461,12 +1466,13 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue CallArgs;
if (IsDirectCall) {
CallArgs = LowerDirectCallArguments(ArgLabel, Chain, OperFlag,
- Outs, dl, DAG);
+ Outs, OutVals, dl, DAG);
Chain = getChain(CallArgs);
OperFlag = getOutFlag(CallArgs);
} else {
CallArgs = LowerIndirectCallArguments(Chain, OperFlag, DataAddr_Lo,
- DataAddr_Hi, Outs, Ins, dl, DAG);
+ DataAddr_Hi, Outs, OutVals, Ins,
+ dl, DAG);
Chain = getChain(CallArgs);
OperFlag = getOutFlag(CallArgs);
}
@@ -1791,14 +1797,14 @@ static PIC16CC::CondCodes IntCCToPIC16CC(ISD::CondCode CC) {
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(RHS)->isNullValue() &&
CC == ISD::SETNE &&
(LHS.getOpcode() == PIC16ISD::SELECT_ICC &&
LHS.getOperand(3).getOpcode() == PIC16ISD::SUBCC) &&
isa<ConstantSDNode>(LHS.getOperand(0)) &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->getZExtValue() == 1 &&
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 0) {
+ cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
+ cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
@@ -1928,15 +1934,12 @@ PIC16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
@@ -1953,11 +1956,12 @@ PIC16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII.get(PIC16::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII.get(PIC16::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h
index eea17f8..0a7506c 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h
+++ b/contrib/llvm/lib/Target/PIC16/PIC16ISelLowering.h
@@ -106,12 +106,14 @@ namespace llvm {
SDValue
LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue
LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
SDValue DataAddr_Lo, SDValue DataAddr_Hi,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG) const;
@@ -143,6 +145,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -151,6 +154,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue ExpandStore(SDNode *N, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp
index 793dd9f..e784f74 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.cpp
@@ -151,25 +151,20 @@ void PIC16InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't load this register from stack slot");
}
-bool PIC16InstrInfo::copyRegToReg (MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
-
- if (DestRC == PIC16::FSR16RegisterClass) {
- BuildMI(MBB, I, DL, get(PIC16::copy_fsr), DestReg).addReg(SrcReg);
- return true;
- }
-
- if (DestRC == PIC16::GPRRegisterClass) {
- BuildMI(MBB, I, DL, get(PIC16::copy_w), DestReg).addReg(SrcReg);
- return true;
- }
+void PIC16InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc;
+ if (PIC16::FSR16RegClass.contains(DestReg, SrcReg))
+ Opc = PIC16::copy_fsr;
+ else if (PIC16::GPRRegClass.contains(DestReg, SrcReg))
+ Opc = PIC16::copy_w;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
- // Not yet supported.
- return false;
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
bool PIC16InstrInfo::isMoveInstr(const MachineInstr &MI,
@@ -196,15 +191,15 @@ bool PIC16InstrInfo::isMoveInstr(const MachineInstr &MI,
unsigned PIC16InstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch?
- DebugLoc dl;
- BuildMI(&MBB, dl, get(PIC16::br_uncond)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(PIC16::br_uncond)).addMBB(TBB);
}
return 1;
}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h
index 40a4cb4..a3a77f1 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h
+++ b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.h
@@ -57,12 +57,10 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual bool isMoveInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
@@ -70,7 +68,8 @@ public:
virtual
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td
index 24df251..86d36cb 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td
+++ b/contrib/llvm/lib/Target/PIC16/PIC16InstrInfo.td
@@ -134,7 +134,7 @@ include "PIC16InstrFormats.td"
//===----------------------------------------------------------------------===//
// W = W Op F : Load the value from F and do Op to W.
-let isTwoAddress = 1, mayLoad = 1 in
+let Constraints = "$src = $dst", mayLoad = 1 in
class BinOpFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
ByteFormat<OpCode, (outs GPR:$dst),
(ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
@@ -146,7 +146,7 @@ class BinOpFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
// F = F Op W : Load the value from F, do op with W and store in F.
// This insn class is not marked as TwoAddress because the reg is
// being used as a source operand only. (Remember a TwoAddress insn
-// needs a copyRegToReg.)
+// needs a copy.)
let mayStore = 1 in
class BinOpWF<bits<6> OpCode, string OpcStr, SDNode OpNode>:
ByteFormat<OpCode, (outs),
@@ -160,7 +160,7 @@ class BinOpWF<bits<6> OpCode, string OpcStr, SDNode OpNode>:
)]>;
// W = W Op L : Do Op of L with W and place result in W.
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
class BinOpWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
LiteralFormat<opcode, (outs GPR:$dst),
(ins GPR:$src, i8imm:$literal),
@@ -220,7 +220,7 @@ def set_fsrlo:
"movwf ${fsr}L",
[]>;
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def set_fsrhi:
ByteFormat<0, (outs FSR16:$dst),
(ins FSR16:$src, GPR:$val),
@@ -234,8 +234,8 @@ def set_pclath:
[(set PCLATHR:$dst , (MTPCLATH GPR:$val))]>;
//----------------------------
-// copyRegToReg
-// copyRegToReg insns. These are dummy. They should always be deleted
+// copyPhysReg
+// copyPhysReg insns. These are dummy. They should always be deleted
// by the optimizer and never be present in the final generated code.
// if they are, then we have to write correct macros for these insns.
//----------------------------
@@ -362,7 +362,7 @@ def addwfc: BinOpWF<0, "addwfc", adde>; // With Carry.
}
// W -= [F] ; load from F and sub the value from W.
-let isTwoAddress = 1, mayLoad = 1 in
+let Constraints = "$src = $dst", mayLoad = 1 in
class SUBFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
ByteFormat<OpCode, (outs GPR:$dst),
(ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
@@ -418,7 +418,7 @@ def orlw : BinOpWL<0, "iorlw", or>;
// sublw
// W = C - W ; sub W from literal. (Without borrow).
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
class SUBLW<bits<6> opcode, string OpcStr, SDNode OpNode> :
LiteralFormat<opcode, (outs GPR:$dst),
(ins GPR:$src, i8imm:$literal),
@@ -426,7 +426,7 @@ class SUBLW<bits<6> opcode, string OpcStr, SDNode OpNode> :
[(set GPR:$dst, (OpNode (i8 imm:$literal), GPR:$src))]>;
// subwl
// W = W - C ; sub literal from W (Without borrow).
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
class SUBWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
LiteralFormat<opcode, (outs GPR:$dst),
(ins GPR:$src, i8imm:$literal),
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp
index b080542..1bcc497 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.cpp
@@ -20,7 +20,7 @@
#include "PIC16ISelLowering.h"
using namespace llvm;
-PIC16MCAsmInfo::PIC16MCAsmInfo(const Target &T, const StringRef &TT) {
+PIC16MCAsmInfo::PIC16MCAsmInfo(const Target &T, StringRef TT) {
CommentString = ";";
GlobalPrefix = PAN::getTagName(PAN::PREFIX_SYMBOL);
GlobalDirective = "\tglobal\t";
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h
index e84db85..6e1c111 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h
+++ b/contrib/llvm/lib/Target/PIC16/PIC16MCAsmInfo.h
@@ -25,7 +25,7 @@ namespace llvm {
const char *RomData16bitsDirective;
const char *RomData32bitsDirective;
public:
- PIC16MCAsmInfo(const Target &T, const StringRef &TT);
+ PIC16MCAsmInfo(const Target &T, StringRef TT);
virtual const char *getDataASDirective(unsigned size, unsigned AS) const;
};
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp b/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp
index ab81ed1..241170b 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16MemSelOpt.cpp
@@ -117,7 +117,7 @@ bool MemSelOpt::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
DebugLoc dl = I->getDebugLoc();
BuildMI(*MBB, I, dl, TII->get(PIC16::pagesel)).addExternalSymbol("$");
Changed = true;
- PageChanged = 0;
+ PageChanged = 0;
}
}
}
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
index c282521..27f1cf5 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
@@ -150,8 +150,8 @@ void PIC16Cloner::markCallGraph(CallGraphNode *CGN, string StringMark) {
// For PIC16, automatic variables of a function are emitted as globals.
-// Clone the auto variables of a function and put them in ValueMap,
-// this ValueMap will be used while
+// Clone the auto variables of a function and put them in VMap,
+// this VMap will be used while
// Cloning the code of function itself.
//
void PIC16Cloner::CloneAutos(Function *F) {
@@ -160,11 +160,11 @@ void PIC16Cloner::CloneAutos(Function *F) {
Module *M = F->getParent();
Module::GlobalListType &Globals = M->getGlobalList();
- // Clear the leftovers in ValueMap by any previous cloning.
- ValueMap.clear();
+ // Clear the leftovers in VMap by any previous cloning.
+ VMap.clear();
// Find the auto globls for this function and clone them, and put them
- // in ValueMap.
+ // in VMap.
std::string FnName = F->getName().str();
std::string VarName, ClonedVarName;
for (Module::global_iterator I = M->global_begin(), E = M->global_end();
@@ -182,8 +182,8 @@ void PIC16Cloner::CloneAutos(Function *F) {
// Add these new globals to module's globals list.
Globals.push_back(ClonedGV);
- // Update ValueMap.
- ValueMap[GV] = ClonedGV;
+ // Update VMap.
+ VMap[GV] = ClonedGV;
}
}
}
@@ -236,10 +236,10 @@ void PIC16Cloner::cloneSharedFunctions(CallGraphNode *CGN) {
}
// Clone the given function and return it.
-// Note: it uses the ValueMap member of the class, which is already populated
+// Note: it uses the VMap member of the class, which is already populated
// by cloneAutos by the time we reach here.
-// FIXME: Should we just pass ValueMap's ref as a parameter here? rather
-// than keeping the ValueMap as a member.
+// FIXME: Should we just pass VMap's ref as a parameter here? rather
+// than keeping the VMap as a member.
Function *
PIC16Cloner::cloneFunction(Function *OrgF) {
Function *ClonedF;
@@ -252,11 +252,11 @@ PIC16Cloner::cloneFunction(Function *OrgF) {
}
// Clone does not exist.
- // First clone the autos, and populate ValueMap.
+ // First clone the autos, and populate VMap.
CloneAutos(OrgF);
// Now create the clone.
- ClonedF = CloneFunction(OrgF, ValueMap);
+ ClonedF = CloneFunction(OrgF, VMap);
// The new function should be for interrupt line. Therefore should have
// the name suffixed with IL and section attribute marked with IL.
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
index 24c1152..e8b5aa4 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
+++ b/contrib/llvm/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
@@ -15,7 +15,7 @@
#ifndef PIC16CLONER_H
#define PIC16CLONER_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
using namespace llvm;
using std::vector;
@@ -72,7 +72,7 @@ namespace llvm {
// the corresponding cloned auto variable of the cloned function.
// This value map is passed during the function cloning so that all the
// uses of auto variables be updated properly.
- DenseMap<const Value*, Value*> ValueMap;
+ ValueMap<const Value*, Value*> VMap;
// Map of a already cloned functions.
map<Function *, Function *> ClonedFunctionMap;
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp
index 30a1d4a..dff98d1 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.cpp
@@ -35,13 +35,6 @@ getCalleeSavedRegs(const MachineFunction *MF) const {
return CalleeSavedRegs;
}
-// PIC16 Callee Saved Reg Classes
-const TargetRegisterClass* const*
-PIC16RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 };
- return CalleeSavedRegClasses;
-}
-
BitVector PIC16RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
return Reserved;
diff --git a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h
index 6a9a038..5536a61 100644
--- a/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h
+++ b/contrib/llvm/lib/Target/PIC16/PIC16RegisterInfo.h
@@ -41,10 +41,6 @@ class PIC16RegisterInfo : public PIC16GenRegisterInfo {
virtual const unsigned*
getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- // PIC16 callee saved register classes
- virtual const TargetRegisterClass* const *
- getCalleeSavedRegClasses(const MachineFunction *MF) const;
-
virtual BitVector getReservedRegs(const MachineFunction &MF) const;
virtual bool hasFP(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 66dfd4b..db11fde 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -78,7 +78,7 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
isLoad = TID.mayLoad();
isStore = TID.mayStore();
- unsigned TSFlags = TID.TSFlags;
+ uint64_t TSFlags = TID.TSFlags;
isFirst = TSFlags & PPCII::PPC970_First;
isSingle = TSFlags & PPCII::PPC970_Single;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 10b516a..d47d989 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1203,11 +1203,11 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
- const GlobalValue *GV = GSDN->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
- SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there isn't really any debug info here
DebugLoc dl = GSDN->getDebugLoc();
+ const GlobalValue *GV = GSDN->getGlobal();
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GSDN->getOffset());
+ SDValue Zero = DAG.getConstant(0, PtrVT);
const TargetMachine &TM = DAG.getTarget();
@@ -1631,7 +1631,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
- isImmutable, false);
+ isImmutable);
// Create load nodes to retrieve arguments from the stack.
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
@@ -1700,8 +1700,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
FuncInfo->setVarArgsStackOffset(
MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
- CCInfo.getNextStackOffset(),
- true, false));
+ CCInfo.getNextStackOffset(), true));
FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
@@ -1911,7 +1910,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
CurArgOffset = CurArgOffset + (4 - ObjSize);
}
// The value of the object is its address.
- int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true, false);
+ int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
InVals.push_back(FIN);
if (ObjSize==1 || ObjSize==2) {
@@ -1936,7 +1935,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// the object.
if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
- int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true, false);
+ int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
@@ -2062,7 +2061,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
if (needsLoad) {
int FI = MFI->CreateFixedObject(ObjSize,
CurArgOffset + (ArgSize - ObjSize),
- isImmutable, false);
+ isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0,
false, false, 0);
@@ -2097,7 +2096,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
FuncInfo->setVarArgsFrameIndex(
MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
- Depth, true, false));
+ Depth, true));
SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
// If this function is vararg, store any remaining integer argument regs
@@ -2137,6 +2136,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
unsigned CC,
const SmallVectorImpl<ISD::OutputArg>
&Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
unsigned &nAltivecParamsAtEnd) {
// Count how many bytes are to be pushed on the stack, including the linkage
// area, and parameter passing area. We start with 24/48 bytes, which is
@@ -2153,9 +2153,9 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
// 16-byte aligned.
nAltivecParamsAtEnd = 0;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
- EVT ArgVT = Arg.getValueType();
+ EVT ArgVT = Outs[i].VT;
// Varargs Altivec parameters are padded to a 16 byte boundary.
if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
@@ -2314,8 +2314,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
int NewRetAddrLoc = SPDiff + PPCFrameInfo::getReturnSaveOffset(isPPC64,
isDarwinABI);
int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
- NewRetAddrLoc,
- true, false);
+ NewRetAddrLoc, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
@@ -2328,7 +2327,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
int NewFPLoc =
SPDiff + PPCFrameInfo::getFramePointerSaveOffset(isPPC64, isDarwinABI);
int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
- true, false);
+ true);
SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
PseudoSourceValue::getFixedStack(NewFPIdx), 0,
@@ -2346,7 +2345,7 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
int Offset = ArgOffset + SPDiff;
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
- int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true,false);
+ int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
SDValue FIN = DAG.getFrameIndex(FI, VT);
TailCallArgumentInfo Info;
@@ -2472,7 +2471,8 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), Callee.getValueType());
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
+ Callee.getValueType());
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType());
else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
@@ -2705,6 +2705,7 @@ PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -2714,11 +2715,11 @@ PPCTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, Ins,
+ isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
} else {
return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
- isTailCall, Outs, Ins,
+ isTailCall, Outs, OutVals, Ins,
dl, DAG, InVals);
}
}
@@ -2728,6 +2729,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -2737,7 +2739,6 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
assert((CallConv == CallingConv::C ||
CallConv == CallingConv::Fast) && "Unknown calling convention!");
- EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
unsigned PtrByteSize = 4;
MachineFunction &MF = DAG.getMachineFunction();
@@ -2769,7 +2770,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
unsigned NumArgs = Outs.size();
for (unsigned i = 0; i != NumArgs; ++i) {
- EVT ArgVT = Outs[i].Val.getValueType();
+ EVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result;
@@ -2838,7 +2839,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
i != e;
++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (Flags.isByVal()) {
@@ -2934,6 +2935,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -2961,7 +2963,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// prereserved space for [SP][CR][LR][3 x unused].
unsigned NumBytes =
CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
- Outs,
+ Outs, OutVals,
nAltivecParamsAtEnd);
// Calculate by how many bytes the stack has to be adjusted in case of tail
@@ -3025,7 +3027,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
SmallVector<SDValue, 8> MemOpChains;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
// PtrOff will be used to store the current argument to the stack if a
@@ -3051,7 +3053,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// Everything else is passed left-justified.
EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
if (GPR_idx != NumGPRs) {
- SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
+ SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, dl, Chain, Arg,
NULL, 0, VT, false, false, 0);
MemOpChains.push_back(Load.getValue(1));
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -3228,8 +3230,8 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
ArgOffset = ((ArgOffset+15)/16)*16;
ArgOffset += 12*16;
for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = Outs[i].Val;
- EVT ArgType = Arg.getValueType();
+ SDValue Arg = OutVals[i];
+ EVT ArgType = Outs[i].VT;
if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
if (++j > NumVRs) {
@@ -3297,6 +3299,7 @@ SDValue
PPCTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
SmallVector<CCValAssign, 16> RVLocs;
@@ -3318,7 +3321,7 @@ PPCTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
Flag = Chain.getValue(1);
}
@@ -3376,8 +3379,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
// Find out what the fix offset of the frame pointer save area.
int LROffset = PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI);
// Allocate the frame index for frame pointer save area.
- RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset,
- true, false);
+ RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
// Save the result.
FI->setReturnAddrSaveIndex(RASI);
}
@@ -3403,8 +3405,7 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
isDarwinABI);
// Allocate the frame index for frame pointer save area.
- FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset,
- true, false);
+ FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
FI->setFramePointerSaveIndex(FPSI);
}
@@ -4518,7 +4519,10 @@ PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
unsigned TmpReg = (!BinOpcode) ? incr :
@@ -4583,7 +4587,10 @@ PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(It, loopMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC =
@@ -4716,23 +4723,22 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
unsigned SelectPred = MI->getOperand(4).getImm();
DebugLoc dl = MI->getDebugLoc();
- BuildMI(BB, dl, TII->get(PPC::BCC))
- .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ BuildMI(BB, dl, TII->get(PPC::BCC))
+ .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -4745,7 +4751,8 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII->get(PPC::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII->get(PPC::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
}
@@ -4831,7 +4838,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
// thisMBB:
// ...
@@ -4899,7 +4909,10 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->insert(It, loop2MBB);
F->insert(It, midMBB);
F->insert(It, exitMBB);
- exitMBB->transferSuccessors(BB);
+ exitMBB->splice(exitMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
const TargetRegisterClass *RC =
@@ -5025,7 +5038,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
llvm_unreachable("Unexpected instr type to insert");
}
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -5042,19 +5055,19 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
default: break;
case PPCISD::SHL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 << V -> 0.
+ if (C->isNullValue()) // 0 << V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 >>u V -> 0.
+ if (C->isNullValue()) // 0 >>u V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRA:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
+ if (C->isNullValue() || // 0 >>s V -> 0.
C->isAllOnesValue()) // -1 >>s V -> -1.
return N->getOperand(0);
}
@@ -5380,11 +5393,8 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector. If it is invalid, don't add anything to Ops. If hasMemory is true
-/// it means one of the asm constraint of the inline asm instruction being
-/// processed is 'm'.
+/// vector. If it is invalid, don't add anything to Ops.
void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
- bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0,0);
@@ -5443,7 +5453,7 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
}
// Handle standard constraint letters.
- TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
+ TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
}
// isLegalAddressingMode - Return true if the addressing mode represented
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 6dcaf1e..700816f 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -318,12 +318,9 @@ namespace llvm {
unsigned getByValTypeAlignment(const Type *Ty) const;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
- /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
- /// true it means one of the asm constraint of the inline asm instruction
- /// being processed is 'm'.
+ /// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -438,6 +435,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -446,6 +444,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue
@@ -465,6 +464,7 @@ namespace llvm {
LowerCall_Darwin(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -472,6 +472,7 @@ namespace llvm {
LowerCall_SVR4(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 1b7a778..1574aa3 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -316,9 +316,8 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -327,50 +326,46 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
else // Conditional branch
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
// Two-way Conditional Branch.
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
return 2;
}
-bool PPCInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DestRC != SrcRC) {
- // Not yet supported!
- return false;
- }
-
- if (DestRC == PPC::GPRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::OR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::G8RCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::OR8), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::F4RCRegisterClass ||
- DestRC == PPC::F8RCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::FMR), DestReg).addReg(SrcReg);
- } else if (DestRC == PPC::CRRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::MCRF), DestReg).addReg(SrcReg);
- } else if (DestRC == PPC::VRRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::VOR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else if (DestRC == PPC::CRBITRCRegisterClass) {
- BuildMI(MBB, MI, DL, get(PPC::CROR), DestReg).addReg(SrcReg).addReg(SrcReg);
- } else {
- // Attempt to copy register that is not GPR or FPR
- return false;
- }
-
- return true;
+void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc;
+ if (PPC::GPRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::OR;
+ else if (PPC::G8RCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::OR8;
+ else if (PPC::F4RCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::FMR;
+ else if (PPC::CRRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::MCRF;
+ else if (PPC::VRRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::VOR;
+ else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg))
+ Opc = PPC::CROR;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ const TargetInstrDesc &TID = get(Opc);
+ if (TID.getNumOperands() == 3)
+ BuildMI(MBB, I, DL, TID, DestReg)
+ .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc));
+ else
+ BuildMI(MBB, I, DL, TID, DestReg).addReg(SrcReg, getKillRegState(KillSrc));
}
bool
@@ -654,121 +649,6 @@ PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
return &*MIB;
}
-/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
-/// copy instructions, turning them into load/store instructions.
-MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
- if (Ops.size() != 1) return NULL;
-
- // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
- // it takes more than one instruction to store it.
- unsigned Opc = MI->getOpcode();
- unsigned OpNum = Ops[0];
-
- MachineInstr *NewMI = NULL;
- if ((Opc == PPC::OR &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STW))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LWZ))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- } else if ((Opc == PPC::OR8 &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STD))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LD))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- } else if (Opc == PPC::FMR || Opc == PPC::FMRSD) {
- // The register may be F4RC or F8RC, and that determines the memory op.
- unsigned OrigReg = MI->getOperand(OpNum).getReg();
- // We cannot tell the register class from a physreg alone.
- if (TargetRegisterInfo::isPhysicalRegister(OrigReg))
- return NULL;
- const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(OrigReg);
- const bool is64 = RC == PPC::F8RCRegisterClass;
-
- if (OpNum == 0) { // move -> store
- unsigned InReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
- get(is64 ? PPC::STFD : PPC::STFS))
- .addReg(InReg,
- getKillRegState(isKill) |
- getUndefRegState(isUndef)),
- FrameIndex);
- } else { // move -> load
- unsigned OutReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
- get(is64 ? PPC::LFD : PPC::LFS))
- .addReg(OutReg,
- RegState::Define |
- getDeadRegState(isDead) |
- getUndefRegState(isUndef)),
- FrameIndex);
- }
- }
-
- return NewMI;
-}
-
-bool PPCInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
- if (Ops.size() != 1) return false;
-
- // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
- // it takes more than one instruction to store it.
- unsigned Opc = MI->getOpcode();
-
- if ((Opc == PPC::OR &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
- return true;
- else if ((Opc == PPC::OR8 &&
- MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
- return true;
- else if (Opc == PPC::FMR || Opc == PPC::FMRSD)
- return true;
-
- return false;
-}
-
-
bool PPCInstrInfo::
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index 7a9e11b..eadb21e 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -109,13 +109,12 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -135,23 +134,6 @@ public:
const MDNode *MDPtr,
DebugLoc DL) const;
- /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
- /// copy instructions, turning them into load/store instructions.
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
- virtual bool canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const;
-
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 0ff852c..4d6132a9 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -269,140 +269,6 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegs : SVR4_CalleeSavedRegs;
}
-const TargetRegisterClass* const*
-PPCRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- // 32-bit Darwin calling convention.
- static const TargetRegisterClass * const Darwin32_CalleeSavedRegClasses[] = {
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- &PPC::GPRCRegClass, 0
- };
-
- // 32-bit SVR4 calling convention.
- static const TargetRegisterClass * const SVR4_CalleeSavedRegClasses[] = {
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
- &PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,&PPC::GPRCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRSAVERCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- 0
- };
-
- // 64-bit Darwin calling convention.
- static const TargetRegisterClass * const Darwin64_CalleeSavedRegClasses[] = {
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- &PPC::G8RCRegClass, 0
- };
-
- // 64-bit SVR4 calling convention.
- static const TargetRegisterClass * const SVR4_64_CalleeSavedRegClasses[] = {
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
- &PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,&PPC::G8RCRegClass,
-
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,&PPC::F8RCRegClass,
- &PPC::F8RCRegClass,&PPC::F8RCRegClass,
-
- &PPC::CRRCRegClass,&PPC::CRRCRegClass,&PPC::CRRCRegClass,
-
- &PPC::VRSAVERCRegClass,
-
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
- &PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,&PPC::VRRCRegClass,
-
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,&PPC::CRBITRCRegClass,
- &PPC::CRBITRCRegClass,
-
- 0
- };
-
- if (Subtarget.isDarwinABI())
- return Subtarget.isPPC64() ? Darwin64_CalleeSavedRegClasses :
- Darwin32_CalleeSavedRegClasses;
-
- return Subtarget.isPPC64() ? SVR4_64_CalleeSavedRegClasses
- : SVR4_CalleeSavedRegClasses;
-}
-
// needsFP - Return true if the specified function should have a dedicated frame
// pointer register. This is true if the function has variable sized allocas or
// if frame pointer elimination is disabled.
@@ -1060,8 +926,7 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(isPPC64,
isDarwinABI);
// Allocate the frame index for frame pointer save area.
- FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset,
- true, false);
+ FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
// Save the result.
FI->setFramePointerSaveIndex(FPSI);
}
@@ -1069,8 +934,7 @@ PPCRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// Reserve stack space to move the linkage area to in case of a tail call.
int TCSPDelta = 0;
if (GuaranteedTailCallOpt && (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
- MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta,
- true, false);
+ MF.getFrameInfo()->CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
}
// Reserve a slot closest to SP or frame pointer if we have a dynalloc or
@@ -1127,9 +991,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RC = CSI[i].getRegClass();
-
- if (RC == PPC::GPRCRegisterClass) {
+ if (PPC::GPRCRegisterClass->contains(Reg)) {
HasGPSaveArea = true;
GPRegs.push_back(CSI[i]);
@@ -1137,7 +999,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinGPR) {
MinGPR = Reg;
}
- } else if (RC == PPC::G8RCRegisterClass) {
+ } else if (PPC::G8RCRegisterClass->contains(Reg)) {
HasG8SaveArea = true;
G8Regs.push_back(CSI[i]);
@@ -1145,7 +1007,7 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
if (Reg < MinG8R) {
MinG8R = Reg;
}
- } else if (RC == PPC::F8RCRegisterClass) {
+ } else if (PPC::F8RCRegisterClass->contains(Reg)) {
HasFPSaveArea = true;
FPRegs.push_back(CSI[i]);
@@ -1154,12 +1016,12 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
MinFPR = Reg;
}
// FIXME SVR4: Disable CR save area for now.
- } else if ( RC == PPC::CRBITRCRegisterClass
- || RC == PPC::CRRCRegisterClass) {
+ } else if (PPC::CRBITRCRegisterClass->contains(Reg)
+ || PPC::CRRCRegisterClass->contains(Reg)) {
// HasCRSaveArea = true;
- } else if (RC == PPC::VRSAVERCRegisterClass) {
+ } else if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
HasVRSAVESaveArea = true;
- } else if (RC == PPC::VRRCRegisterClass) {
+ } else if (PPC::VRRCRegisterClass->contains(Reg)) {
HasVRSaveArea = true;
VRegs.push_back(CSI[i]);
@@ -1240,9 +1102,10 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
// which have the CR/CRBIT register class?
// Adjust the frame index of the CR spill slot.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- const TargetRegisterClass *RC = CSI[i].getRegClass();
+ unsigned Reg = CSI[i].getReg();
- if (RC == PPC::CRBITRCRegisterClass || RC == PPC::CRRCRegisterClass) {
+ if (PPC::CRBITRCRegisterClass->contains(Reg) ||
+ PPC::CRRCRegisterClass->contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -1257,9 +1120,9 @@ PPCRegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF)
// which have the VRSAVE register class?
// Adjust the frame index of the VRSAVE spill slot.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
- const TargetRegisterClass *RC = CSI[i].getRegClass();
+ unsigned Reg = CSI[i].getReg();
- if (RC == PPC::VRSAVERCRegisterClass) {
+ if (PPC::VRSAVERCRegisterClass->contains(Reg)) {
int FI = CSI[i].getFrameIdx();
FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI));
@@ -1762,4 +1625,3 @@ int PPCRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
}
#include "PPCGenRegisterInfo.inc"
-
diff --git a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
index 43cf535..f026847 100644
--- a/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
+++ b/contrib/llvm/lib/Target/PowerPC/PPCRegisterInfo.h
@@ -42,9 +42,6 @@ public:
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
/// targetHandlesStackFrameRounding - Returns true if the target is
diff --git a/contrib/llvm/lib/Target/README.txt b/contrib/llvm/lib/Target/README.txt
index 7fa73ed..4d7ee08 100644
--- a/contrib/llvm/lib/Target/README.txt
+++ b/contrib/llvm/lib/Target/README.txt
@@ -300,6 +300,14 @@ unsigned long reverse(unsigned v) {
return v ^ (t >> 8);
}
+Neither is this (very standard idiom):
+
+int f(int n)
+{
+ return (((n) << 24) | (((n) & 0xff00) << 8)
+ | (((n) >> 8) & 0xff00) | ((n) >> 24));
+}
+
//===---------------------------------------------------------------------===//
[LOOP RECOGNITION]
@@ -898,17 +906,6 @@ The expression should optimize to something like
//===---------------------------------------------------------------------===//
-From GCC Bug 3756:
-int
-pn (int n)
-{
- return (n >= 0 ? 1 : -1);
-}
-Should combine to (n >> 31) | 1. Currently not optimized with "clang
--emit-llvm-bc | opt -std-compile-opts | llc".
-
-//===---------------------------------------------------------------------===//
-
void a(int variable)
{
if (variable == 4 || variable == 6)
@@ -1439,33 +1436,6 @@ This pattern repeats several times, basically doing:
//===---------------------------------------------------------------------===//
-186.crafty contains this interesting pattern:
-
-%77 = call i8* @strstr(i8* getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0),
- i8* %30)
-%phitmp648 = icmp eq i8* %77, getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0)
-br i1 %phitmp648, label %bb70, label %bb76
-
-bb70: ; preds = %OptionMatch.exit91, %bb69
- %78 = call i32 @strlen(i8* %30) nounwind readonly align 1 ; <i32> [#uses=1]
-
-This is basically:
- cststr = "abcdef";
- if (strstr(cststr, P) == cststr) {
- x = strlen(P);
- ...
-
-The strstr call would be significantly cheaper written as:
-
-cststr = "abcdef";
-if (memcmp(P, str, strlen(P)))
- x = strlen(P);
-
-This is memcmp+strlen instead of strstr. This also makes the strlen fully
-redundant.
-
-//===---------------------------------------------------------------------===//
-
186.crafty also contains this code:
%1906 = call i32 @strlen(i8* getelementptr ([32 x i8]* @pgn_event, i32 0,i32 0))
@@ -1863,3 +1833,91 @@ LLVM prefers comparisons with zero over non-zero in general, but in this
case it choses instead to keep the max operation obvious.
//===---------------------------------------------------------------------===//
+
+Take the following testcase on x86-64 (similar testcases exist for all targets
+with addc/adde):
+
+define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b,
+i64 %c) nounwind {
+entry:
+ %0 = zext i64 %a to i128 ; <i128> [#uses=1]
+ %1 = zext i64 %b to i128 ; <i128> [#uses=1]
+ %2 = add i128 %1, %0 ; <i128> [#uses=2]
+ %3 = zext i64 %c to i128 ; <i128> [#uses=1]
+ %4 = shl i128 %3, 64 ; <i128> [#uses=1]
+ %5 = add i128 %4, %2 ; <i128> [#uses=1]
+ %6 = lshr i128 %5, 64 ; <i128> [#uses=1]
+ %7 = trunc i128 %6 to i64 ; <i64> [#uses=1]
+ store i64 %7, i64* %s, align 8
+ %8 = trunc i128 %2 to i64 ; <i64> [#uses=1]
+ store i64 %8, i64* %t, align 8
+ ret void
+}
+
+Generated code:
+ addq %rcx, %rdx
+ movl $0, %eax
+ adcq $0, %rax
+ addq %r8, %rax
+ movq %rax, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+Expected code:
+ addq %rcx, %rdx
+ adcq $0, %r8
+ movq %r8, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+The generated SelectionDAG has an ADD of an ADDE, where both operands of the
+ADDE are zero. Replacing one of the operands of the ADDE with the other operand
+of the ADD, and replacing the ADD with the ADDE, should give the desired result.
+
+(That said, we are doing a lot better than gcc on this testcase. :) )
+
+//===---------------------------------------------------------------------===//
+
+Switch lowering generates less than ideal code for the following switch:
+define void @a(i32 %x) nounwind {
+entry:
+ switch i32 %x, label %if.end [
+ i32 0, label %if.then
+ i32 1, label %if.then
+ i32 2, label %if.then
+ i32 3, label %if.then
+ i32 5, label %if.then
+ ]
+if.then:
+ tail call void @foo() nounwind
+ ret void
+if.end:
+ ret void
+}
+declare void @foo()
+
+Generated code on x86-64 (other platforms give similar results):
+a:
+ cmpl $5, %edi
+ ja .LBB0_2
+ movl %edi, %eax
+ movl $47, %ecx
+ btq %rax, %rcx
+ jb .LBB0_3
+.LBB0_2:
+ ret
+.LBB0_3:
+ jmp foo # TAILCALL
+
+The movl+movl+btq+jb could be simplified to a cmpl+jne.
+
+Or, if we wanted to be really clever, we could simplify the whole thing to
+something like the following, which eliminates a branch:
+ xorl $1, %edi
+ cmpl $4, %edi
+ ja .LBB0_2
+ ret
+.LBB0_2:
+ jmp foo # TAILCALL
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index f47e53a..4099a62 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -38,6 +38,7 @@ SDValue
SparcTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to locations.
@@ -66,7 +67,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together with flags.
Flag = Chain.getValue(1);
@@ -133,7 +134,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
InVals.push_back(Arg);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true, false);
+ true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load;
if (ObjectVT == MVT::i32) {
@@ -146,7 +147,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
DAG.getConstant(Offset, MVT::i32));
- Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
+ Load = DAG.getExtLoad(LoadOp, MVT::i32, dl, Chain, FIPtr,
NULL, 0, ObjectVT, false, false, 0);
Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
}
@@ -169,7 +170,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
InVals.push_back(Arg);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true, false);
+ true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0,
false, false, 0);
@@ -192,7 +193,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true, false);
+ true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0,
false, false, 0);
@@ -205,7 +206,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4,
- true, false);
+ true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0,
false, false, 0);
@@ -239,7 +240,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
- true, false);
+ true);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0,
@@ -262,6 +263,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -283,7 +285,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Count the size of the outgoing arguments.
unsigned ArgsSize = 0;
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- switch (Outs[i].Val.getValueType().getSimpleVT().SimpleTy) {
+ switch (Outs[i].VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown value type!");
case MVT::i1:
case MVT::i8:
@@ -316,7 +318,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -358,8 +360,8 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned ArgOffset = 68;
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
- SDValue Val = Outs[i].Val;
- EVT ObjectVT = Val.getValueType();
+ SDValue Val = OutVals[i];
+ EVT ObjectVT = Outs[i].VT;
SDValue ValToStore(0, 0);
unsigned ObjSize;
switch (ObjectVT.getSimpleVT().SimpleTy) {
@@ -478,7 +480,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
@@ -737,7 +739,7 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(RHS)->isNullValue() &&
CC == ISD::SETNE &&
((LHS.getOpcode() == SPISD::SELECT_ICC &&
LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
@@ -745,8 +747,8 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
isa<ConstantSDNode>(LHS.getOperand(0)) &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->getZExtValue() == 1 &&
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 0) {
+ cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
+ cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
@@ -759,7 +761,7 @@ SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
@@ -1007,21 +1009,20 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ // Add the true and fallthrough blocks as its successors.
+ BB->addSuccessor(copy0MBB);
+ BB->addSuccessor(sinkMBB);
+
BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
- // Next, add the true and fallthrough blocks as its successors.
- BB->addSuccessor(copy0MBB);
- BB->addSuccessor(sinkMBB);
// copy0MBB:
// %FalseValue = ...
@@ -1035,11 +1036,11 @@ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
index 5ebdcac..db39e08 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -86,6 +86,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -94,6 +95,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index 8e49eca..3a4c80a 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -109,38 +109,29 @@ unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
unsigned
SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond)const{
- // FIXME this should probably take a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL)const{
// Can only insert uncond branches so far.
assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, dl, get(SP::BA)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
return 1;
}
-bool SparcInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
- if (DestRC != SrcRC) {
- // Not yet supported!
- return false;
- }
-
- if (DestRC == SP::IntRegsRegisterClass)
- BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0).addReg(SrcReg);
- else if (DestRC == SP::FPRegsRegisterClass)
- BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg).addReg(SrcReg);
- else if (DestRC == SP::DFPRegsRegisterClass)
- BuildMI(MBB, I, DL, get(Subtarget.isV9() ? SP::FMOVD : SP::FpMOVD),DestReg)
- .addReg(SrcReg);
+void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ if (SP::IntRegsRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (SP::FPRegsRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(Subtarget.isV9() ? SP::FMOVD : SP::FpMOVD), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
else
- // Can't copy this register
- return false;
-
- return true;
+ llvm_unreachable("Impossible reg-to-reg copy");
}
void SparcInstrInfo::
@@ -183,61 +174,6 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
llvm_unreachable("Can't load this register from stack slot");
}
-MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FI) const {
- if (Ops.size() != 1) return NULL;
-
- unsigned OpNum = Ops[0];
- bool isFloat = false;
- MachineInstr *NewMI = NULL;
- switch (MI->getOpcode()) {
- case SP::ORrr:
- if (MI->getOperand(1).isReg() && MI->getOperand(1).getReg() == SP::G0&&
- MI->getOperand(0).isReg() && MI->getOperand(2).isReg()) {
- if (OpNum == 0) // COPY -> STORE
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::STri))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(MI->getOperand(2).getReg());
- else // COPY -> LOAD
- NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::LDri),
- MI->getOperand(0).getReg())
- .addFrameIndex(FI)
- .addImm(0);
- }
- break;
- case SP::FMOVS:
- isFloat = true;
- // FALLTHROUGH
- case SP::FMOVD:
- if (OpNum == 0) { // COPY -> STORE
- unsigned SrcReg = MI->getOperand(1).getReg();
- bool isKill = MI->getOperand(1).isKill();
- bool isUndef = MI->getOperand(1).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(),
- get(isFloat ? SP::STFri : SP::STDFri))
- .addFrameIndex(FI)
- .addImm(0)
- .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
- } else { // COPY -> LOAD
- unsigned DstReg = MI->getOperand(0).getReg();
- bool isDead = MI->getOperand(0).isDead();
- bool isUndef = MI->getOperand(0).isUndef();
- NewMI = BuildMI(MF, MI->getDebugLoc(),
- get(isFloat ? SP::LDFri : SP::LDDFri))
- .addReg(DstReg, RegState::Define |
- getDeadRegState(isDead) | getUndefRegState(isUndef))
- .addFrameIndex(FI)
- .addImm(0);
- }
- break;
- }
-
- return NewMI;
-}
-
unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
{
SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
index a00ba39..1334718 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -68,14 +68,13 @@ public:
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -89,18 +88,6 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
-
- virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const {
- return 0;
- }
-
unsigned getGlobalBaseReg(MachineFunction *MF) const;
};
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
index 9489580..ddadd51 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -665,7 +665,7 @@ let Defs = [FCC] in {
//===----------------------------------------------------------------------===//
// V9 Conditional Moves.
-let Predicates = [HasV9], isTwoAddress = 1 in {
+let Predicates = [HasV9], Constraints = "$T = $dst" in {
// Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
// FIXME: Add instruction encodings for the JIT some day.
def MOVICCrr
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.cpp
index 535c6f7..d37d6d2 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.cpp
@@ -12,10 +12,9 @@
//===----------------------------------------------------------------------===//
#include "SparcMCAsmInfo.h"
-#include "llvm/ADT/SmallVector.h"
using namespace llvm;
-SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Target &T, const StringRef &TT) {
+SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Target &T, StringRef TT) {
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = 0; // .xword is only supported by V9.
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.h b/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.h
index 12d6ef4..0cb6827 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcMCAsmInfo.h
@@ -14,13 +14,14 @@
#ifndef SPARCTARGETASMINFO_H
#define SPARCTARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
+
struct SparcELFMCAsmInfo : public MCAsmInfo {
- explicit SparcELFMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit SparcELFMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
index 08373bb8..427cc7f 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -52,13 +52,6 @@ BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
-
-const TargetRegisterClass* const*
-SparcRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = { 0 };
- return CalleeSavedRegClasses;
-}
-
bool SparcRegisterInfo::hasFP(const MachineFunction &MF) const {
return false;
}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
index 24d43e3..9f0cda7 100644
--- a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -32,9 +32,6 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const* getCalleeSavedRegClasses(
- const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp
index 90be222..d7ac8f5 100644
--- a/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/AsmPrinter/SystemZAsmPrinter.cpp
@@ -124,7 +124,7 @@ void SystemZAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
unsigned Reg = MO.getReg();
if (Modifier && strncmp(Modifier, "subreg", 6) == 0) {
if (strncmp(Modifier + 7, "even", 4) == 0)
- Reg = TM.getRegisterInfo()->getSubReg(Reg, SystemZ::subreg_even32);
+ Reg = TM.getRegisterInfo()->getSubReg(Reg, SystemZ::subreg_32bit);
else if (strncmp(Modifier + 7, "odd", 3) == 0)
Reg = TM.getRegisterInfo()->getSubReg(Reg, SystemZ::subreg_odd32);
else
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index bb2952a..ed290ca 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -670,7 +670,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// Copy the remainder (even subreg) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_even32 : SystemZ::subreg_even);
+ SystemZ::subreg_32bit : SystemZ::subreg_even);
SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
dl, NVT,
SDValue(Result, 0),
@@ -754,7 +754,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// Copy the remainder (even subreg) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
unsigned SubRegIdx = (is32Bit ?
- SystemZ::subreg_even32 : SystemZ::subreg_even);
+ SystemZ::subreg_32bit : SystemZ::subreg_even);
SDNode *Rem = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
dl, NVT,
SDValue(Result, 0),
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 76f2901..67f739f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -254,6 +254,7 @@ SystemZTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -266,7 +267,7 @@ SystemZTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case CallingConv::Fast:
case CallingConv::C:
return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, Ins, dl, DAG, InVals);
+ Outs, OutVals, Ins, dl, DAG, InVals);
}
}
@@ -334,7 +335,7 @@ SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the nodes corresponding to a load from this parameter slot.
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(LocVT.getSizeInBits()/8,
- VA.getLocMemOffset(), true, false);
+ VA.getLocMemOffset(), true);
// Create the SelectionDAG nodes corresponding to a load
// from this parameter
@@ -372,6 +373,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg>
&Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -402,7 +404,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -464,7 +466,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy());
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
@@ -550,6 +552,7 @@ SDValue
SystemZTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location
@@ -575,7 +578,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
// Copy the result values into the output registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
- SDValue ResValue = Outs[i].Val;
+ SDValue ResValue = OutVals[i];
assert(VA.isRegLoc() && "Can only return in registers!");
// If this is an 8/16/32-bit value, it is really should be passed promoted
@@ -729,14 +732,14 @@ SDValue SystemZTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue Result;
if (!IsPic && !ExtraLoadRequired) {
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
Offset = 0;
} else {
unsigned char OpFlags = 0;
if (ExtraLoadRequired)
OpFlags = SystemZII::MO_GOTENT;
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0, OpFlags);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
}
Result = DAG.getNode(SystemZISD::PCRelativeWrapper, dl,
@@ -827,16 +830,20 @@ SystemZTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
SystemZCC::CondCodes CC = (SystemZCC::CondCodes)MI->getOperand(3).getImm();
- BuildMI(BB, dl, TII.getBrCond(CC)).addMBB(copy1MBB);
F->insert(I, copy0MBB);
F->insert(I, copy1MBB);
// Update machine-CFG edges by transferring all successors of the current
// block to the new block which will contain the Phi node for the select.
- copy1MBB->transferSuccessors(BB);
+ copy1MBB->splice(copy1MBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(copy1MBB);
+ BuildMI(BB, dl, TII.getBrCond(CC)).addMBB(copy1MBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to copy1MBB
@@ -849,11 +856,11 @@ SystemZTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = copy1MBB;
- BuildMI(BB, dl, TII.get(SystemZ::PHI),
+ BuildMI(*BB, BB->begin(), dl, TII.get(SystemZ::PHI),
MI->getOperand(0).getReg())
.addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 94bd906..51d2df3 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -98,6 +98,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -126,6 +127,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -134,6 +136,7 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
const SystemZSubtarget &Subtarget;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index 8c5e905..a658280 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -126,7 +126,7 @@ def FNABS64rr : Pseudo<(outs FP64:$dst), (ins FP64:$src),
(implicit PSW)]>;
}
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Defs = [PSW] in {
let isCommutable = 1 in { // X = ADD Y, Z == X = ADD Z, Y
def FADD32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
@@ -237,7 +237,7 @@ def FDIV64rm : Pseudo<(outs FP64:$dst), (ins FP64:$src1, rriaddr12:$src2),
"ddb\t{$dst, $src2}",
[(set FP64:$dst, (fdiv FP64:$src1, (load rriaddr12:$src2)))]>;
-} // isTwoAddress = 1
+} // Constraints = "$src1 = $dst"
def FSQRT32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
"sqebr\t{$dst, $src}",
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 043686c..c03864f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -117,59 +117,28 @@ void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
-bool SystemZInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
-
- // Determine if DstRC and SrcRC have a common superclass.
- const TargetRegisterClass *CommonRC = DestRC;
- if (DestRC == SrcRC)
- /* Same regclass for source and dest */;
- else if (CommonRC->hasSuperClass(SrcRC))
- CommonRC = SrcRC;
- else if (!CommonRC->hasSubClass(SrcRC))
- CommonRC = 0;
-
- if (CommonRC) {
- if (CommonRC == &SystemZ::GR64RegClass ||
- CommonRC == &SystemZ::ADDR64RegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV64rr), DestReg).addReg(SrcReg);
- } else if (CommonRC == &SystemZ::GR32RegClass ||
- CommonRC == &SystemZ::ADDR32RegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV32rr), DestReg).addReg(SrcReg);
- } else if (CommonRC == &SystemZ::GR64PRegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV64rrP), DestReg).addReg(SrcReg);
- } else if (CommonRC == &SystemZ::GR128RegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV128rr), DestReg).addReg(SrcReg);
- } else if (CommonRC == &SystemZ::FP32RegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::FMOV32rr), DestReg).addReg(SrcReg);
- } else if (CommonRC == &SystemZ::FP64RegClass) {
- BuildMI(MBB, I, DL, get(SystemZ::FMOV64rr), DestReg).addReg(SrcReg);
- } else {
- return false;
- }
-
- return true;
- }
-
- if ((SrcRC == &SystemZ::GR64RegClass &&
- DestRC == &SystemZ::ADDR64RegClass) ||
- (DestRC == &SystemZ::GR64RegClass &&
- SrcRC == &SystemZ::ADDR64RegClass)) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV64rr), DestReg).addReg(SrcReg);
- return true;
- } else if ((SrcRC == &SystemZ::GR32RegClass &&
- DestRC == &SystemZ::ADDR32RegClass) ||
- (DestRC == &SystemZ::GR32RegClass &&
- SrcRC == &SystemZ::ADDR32RegClass)) {
- BuildMI(MBB, I, DL, get(SystemZ::MOV32rr), DestReg).addReg(SrcReg);
- return true;
- }
-
- return false;
+void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ unsigned Opc;
+ if (SystemZ::GR64RegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::MOV64rr;
+ else if (SystemZ::GR32RegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::MOV32rr;
+ else if (SystemZ::GR64PRegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::MOV64rrP;
+ else if (SystemZ::GR128RegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::MOV128rr;
+ else if (SystemZ::FP32RegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::FMOV32rr;
+ else if (SystemZ::FP64RegClass.contains(DestReg, SrcReg))
+ Opc = SystemZ::FMOV64rr;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ BuildMI(MBB, I, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
}
bool
@@ -286,8 +255,7 @@ SystemZInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
unsigned LowReg = 0, HighReg = 0, StartOffset = -1U, EndOffset = 0;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RegClass = CSI[i].getRegClass();
- if (RegClass != &SystemZ::FP64RegClass) {
+ if (!SystemZ::FP64RegClass.contains(Reg)) {
unsigned Offset = RegSpillOffsets[Reg];
CalleeFrameSize += 8;
if (StartOffset > Offset) {
@@ -332,11 +300,10 @@ SystemZInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
// Save FPRs
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RegClass = CSI[i].getRegClass();
- if (RegClass == &SystemZ::FP64RegClass) {
+ if (SystemZ::FP64RegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
- storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(), RegClass,
- &RI);
+ storeRegToStackSlot(MBB, MI, Reg, true, CSI[i].getFrameIdx(),
+ &SystemZ::FP64RegClass, &RI);
}
}
@@ -361,9 +328,9 @@ SystemZInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// Restore FP registers
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- const TargetRegisterClass *RegClass = CSI[i].getRegClass();
- if (RegClass == &SystemZ::FP64RegClass)
- loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass, &RI);
+ if (SystemZ::FP64RegClass.contains(Reg))
+ loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
+ &SystemZ::FP64RegClass, &RI);
}
// Restore GP registers
@@ -523,9 +490,8 @@ unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME: this should probably have a DebugLoc operand
- DebugLoc DL;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index a753f14..0559619 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -60,11 +60,10 @@ public:
///
virtual const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
- bool copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
bool isMoveInstr(const MachineInstr& MI,
unsigned &SrcReg, unsigned &DstReg,
@@ -102,7 +101,8 @@ public:
bool AllowModify) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
SystemZCC::CondCodes getOppositeCondition(SystemZCC::CondCodes CC) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 22bde4e..8df07c0 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -478,7 +478,8 @@ def MOV64rmm : RSYI<0x04EB,
"lmg\t{$from, $to, $dst}",
[]>;
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isTwoAddress = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1,
+ Constraints = "$src = $dst" in {
def MOV64Pr0_even : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
"lhi\t${dst:subreg_even}, 0",
[]>;
@@ -537,7 +538,7 @@ def NEG64rr32 : RREI<0xB913, (outs GR64:$dst), (ins GR32:$src),
(implicit PSW)]>;
}
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Defs = [PSW] in {
@@ -924,12 +925,12 @@ def UDIVREM64m : RXYI<0xE387, (outs GR128:$dst), (ins GR128:$src1, rriaddr:$src2
"dlg\t{$dst, $src2}",
[]>;
} // mayLoad
-} // isTwoAddress = 1
+} // Constraints = "$src1 = $dst"
//===----------------------------------------------------------------------===//
// Shifts
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def SRL32rri : RSI<0x88,
(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
"srl\t{$src, $amt}",
@@ -939,7 +940,7 @@ def SRL64rri : RSYI<0xEB0C,
"srlg\t{$dst, $src, $amt}",
[(set GR64:$dst, (srl GR64:$src, riaddr:$amt))]>;
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def SHL32rri : RSI<0x89,
(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
"sll\t{$src, $amt}",
@@ -950,7 +951,7 @@ def SHL64rri : RSYI<0xEB0D,
[(set GR64:$dst, (shl GR64:$src, riaddr:$amt))]>;
let Defs = [PSW] in {
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def SRA32rri : RSI<0x8A,
(outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
"sra\t{$src, $amt}",
@@ -1129,13 +1130,13 @@ def : Pat<(mulhs GR32:$src1, GR32:$src2),
(EXTRACT_SUBREG (MUL64rrP (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
GR32:$src1, subreg_odd32),
GR32:$src2),
- subreg_even32)>;
+ subreg_32bit)>;
def : Pat<(mulhu GR32:$src1, GR32:$src2),
(EXTRACT_SUBREG (UMUL64rrP (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
GR32:$src1, subreg_odd32),
GR32:$src2),
- subreg_even32)>;
+ subreg_32bit)>;
def : Pat<(mulhu GR64:$src1, GR64:$src2),
(EXTRACT_SUBREG (UMUL128rrP (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
GR64:$src1, subreg_odd),
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
index f9ccc47..4f7f70b 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.cpp
@@ -16,7 +16,7 @@
#include "llvm/MC/MCSectionELF.h"
using namespace llvm;
-SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, const StringRef &TT) {
+SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
PrivateGlobalPrefix = ".L";
WeakRefDirective = "\t.weak\t";
PCSymbol = ".";
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.h
index 87908f2..a6a27e2 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCAsmInfo.h
@@ -21,7 +21,7 @@ namespace llvm {
class StringRef;
struct SystemZMCAsmInfo : public MCAsmInfo {
- explicit SystemZMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit SystemZMCAsmInfo(const Target &T, StringRef TT);
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const;
};
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index 638fd17..ae96b0b 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -47,22 +47,6 @@ SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
return CalleeSavedRegs;
}
-const TargetRegisterClass* const*
-SystemZRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- &SystemZ::GR64RegClass, &SystemZ::GR64RegClass,
- &SystemZ::GR64RegClass, &SystemZ::GR64RegClass,
- &SystemZ::GR64RegClass, &SystemZ::GR64RegClass,
- &SystemZ::GR64RegClass, &SystemZ::GR64RegClass,
- &SystemZ::GR64RegClass, &SystemZ::GR64RegClass,
- &SystemZ::FP64RegClass, &SystemZ::FP64RegClass,
- &SystemZ::FP64RegClass, &SystemZ::FP64RegClass,
- &SystemZ::FP64RegClass, &SystemZ::FP64RegClass,
- &SystemZ::FP64RegClass, &SystemZ::FP64RegClass, 0
- };
- return CalleeSavedRegClasses;
-}
-
BitVector SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
if (hasFP(MF))
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
index 42aa5dd..670025f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -32,9 +32,6 @@ struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
/// Code Generation virtual methods...
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const* getCalleeSavedRegClasses(
- const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool hasReservedCallFrame(MachineFunction &MF) const { return true; }
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
index b561744..33be8dd 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -55,7 +55,6 @@ class FPRL<bits<4> num, string n, list<Register> subregs>
let Namespace = "SystemZ" in {
def subreg_32bit : SubRegIndex;
-def subreg_even32 : SubRegIndex;
def subreg_odd32 : SubRegIndex;
def subreg_even : SubRegIndex;
def subreg_odd : SubRegIndex;
@@ -99,7 +98,7 @@ def R15D : GPR64<15, "r15", [R15W]>, DwarfRegNum<[15]>;
}
// Register pairs
-let SubRegIndices = [subreg_even32, subreg_odd32] in {
+let SubRegIndices = [subreg_32bit, subreg_odd32] in {
def R0P : GPR64< 0, "r0", [R0W, R1W], [R0D, R1D]>, DwarfRegNum<[0]>;
def R2P : GPR64< 2, "r2", [R2W, R3W], [R2D, R3D]>, DwarfRegNum<[2]>;
def R4P : GPR64< 4, "r4", [R4W, R5W], [R4D, R5D]>, DwarfRegNum<[4]>;
@@ -111,8 +110,7 @@ def R14P : GPR64<14, "r14", [R14W, R15W], [R14D, R15D]>, DwarfRegNum<[14]>;
}
let SubRegIndices = [subreg_even, subreg_odd],
- CompositeIndices = [(subreg_even32 subreg_even, subreg_32bit),
- (subreg_odd32 subreg_odd, subreg_32bit)] in {
+ CompositeIndices = [(subreg_odd32 subreg_odd, subreg_32bit)] in {
def R0Q : GPR128< 0, "r0", [R0D, R1D], [R0P]>, DwarfRegNum<[0]>;
def R2Q : GPR128< 2, "r2", [R2D, R3D], [R2P]>, DwarfRegNum<[2]>;
def R4Q : GPR128< 4, "r4", [R4D, R5D], [R4P]>, DwarfRegNum<[4]>;
@@ -355,7 +353,7 @@ def ADDR64 : RegisterClass<"SystemZ", [i64], 64,
def GR64P : RegisterClass<"SystemZ", [v2i32], 64,
[R0P, R2P, R4P, R6P, R8P, R10P, R12P, R14P]>
{
- let SubRegClasses = [(GR32 subreg_even32, subreg_odd32)];
+ let SubRegClasses = [(GR32 subreg_32bit, subreg_odd32)];
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
@@ -391,7 +389,7 @@ def GR64P : RegisterClass<"SystemZ", [v2i32], 64,
def GR128 : RegisterClass<"SystemZ", [v2i64], 128,
[R0Q, R2Q, R4Q, R6Q, R8Q, R10Q, R12Q, R14Q]>
{
- let SubRegClasses = [(GR32 subreg_even32, subreg_odd32),
+ let SubRegClasses = [(GR32 subreg_32bit, subreg_odd32),
(GR64 subreg_even, subreg_odd)];
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/TargetInstrInfo.cpp b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
index 094a57e..c099a7e 100644
--- a/contrib/llvm/lib/Target/TargetInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetInstrInfo.cpp
@@ -28,6 +28,10 @@ const TargetRegisterClass *
TargetOperandInfo::getRegClass(const TargetRegisterInfo *TRI) const {
if (isLookupPtrRegClass())
return TRI->getPointerRegClass(RegClass);
+ // Instructions like INSERT_SUBREG do not have fixed register classes.
+ if (RegClass < 0)
+ return 0;
+ // Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
}
diff --git a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
index b9372d0..dd7b532 100644
--- a/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
+++ b/contrib/llvm/lib/Target/TargetLoweringObjectFile.cpp
@@ -101,7 +101,7 @@ static bool IsNullTerminatedString(const Constant *C) {
ConstantInt *Null =
dyn_cast<ConstantInt>(CVA->getOperand(ATy->getNumElements()-1));
- if (Null == 0 || Null->getZExtValue() != 0)
+ if (Null == 0 || !Null->isZero())
return false; // Not null terminated.
// Verify that the null doesn't occur anywhere else in the string.
diff --git a/contrib/llvm/lib/Target/TargetMachine.cpp b/contrib/llvm/lib/Target/TargetMachine.cpp
index df52368..47c91df 100644
--- a/contrib/llvm/lib/Target/TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/TargetMachine.cpp
@@ -294,7 +294,7 @@ namespace llvm {
/// option is specified on the command line. If this returns false (default),
/// the code generator is not allowed to assume that FP arithmetic arguments
/// and results are never NaNs or +-Infs.
- bool FiniteOnlyFPMath() { return UnsafeFPMath || FiniteOnlyFPMathOption; }
+ bool FiniteOnlyFPMath() { return FiniteOnlyFPMathOption; }
/// HonorSignDependentRoundingFPMath - Return true if the codegen must assume
/// that the rounding mode of the FPU can change from its default.
diff --git a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
index dcc5f61..49bfad5 100644
--- a/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/TargetRegisterInfo.cpp
@@ -39,20 +39,20 @@ TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
TargetRegisterInfo::~TargetRegisterInfo() {}
-/// getPhysicalRegisterRegClass - Returns the Register Class of a physical
-/// register of the given type. If type is EVT::Other, then just return any
-/// register class the register belongs to.
+/// getMinimalPhysRegClass - Returns the Register Class of a physical
+/// register of the given type, picking the most sub register class of
+/// the right type that contains this physreg.
const TargetRegisterClass *
-TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
+TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
assert(isPhysicalRegister(reg) && "reg must be a physical register");
- // Pick the most super register class of the right type that contains
+ // Pick the most sub register class of the right type that contains
// this physreg.
const TargetRegisterClass* BestRC = 0;
for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
const TargetRegisterClass* RC = *I;
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
- (!BestRC || BestRC->hasSuperClass(RC)))
+ (!BestRC || BestRC->hasSubClass(RC)))
BestRC = RC;
}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
index a58f58e..26797ab 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmLexer.cpp
@@ -33,13 +33,11 @@ class X86AsmLexer : public TargetAsmLexer {
}
const AsmToken &lexDefinite() {
- if(tentativeIsValid) {
+ if (tentativeIsValid) {
tentativeIsValid = false;
return tentativeToken;
}
- else {
- return getLexer()->Lex();
- }
+ return getLexer()->Lex();
}
AsmToken LexTokenATT();
@@ -72,38 +70,65 @@ public:
static unsigned MatchRegisterName(StringRef Name);
AsmToken X86AsmLexer::LexTokenATT() {
- const AsmToken lexedToken = lexDefinite();
+ AsmToken lexedToken = lexDefinite();
switch (lexedToken.getKind()) {
default:
- return AsmToken(lexedToken);
+ return lexedToken;
case AsmToken::Error:
SetError(Lexer->getErrLoc(), Lexer->getErr());
- return AsmToken(lexedToken);
- case AsmToken::Percent:
- {
+ return lexedToken;
+
+ case AsmToken::Percent: {
const AsmToken &nextToken = lexTentative();
- if (nextToken.getKind() == AsmToken::Identifier) {
- unsigned regID = MatchRegisterName(nextToken.getString());
+ if (nextToken.getKind() != AsmToken::Identifier)
+ return lexedToken;
+
- if (regID) {
- lexDefinite();
+ if (unsigned regID = MatchRegisterName(nextToken.getString())) {
+ lexDefinite();
+ // FIXME: This is completely wrong when there is a space or other
+ // punctuation between the % and the register name.
+ StringRef regStr(lexedToken.getString().data(),
+ lexedToken.getString().size() +
+ nextToken.getString().size());
+
+ return AsmToken(AsmToken::Register, regStr,
+ static_cast<int64_t>(regID));
+ }
+
+ // Match register name failed. If this is "db[0-7]", match it as an alias
+ // for dr[0-7].
+ if (nextToken.getString().size() == 3 &&
+ nextToken.getString().startswith("db")) {
+ int RegNo = -1;
+ switch (nextToken.getString()[2]) {
+ case '0': RegNo = X86::DR0; break;
+ case '1': RegNo = X86::DR1; break;
+ case '2': RegNo = X86::DR2; break;
+ case '3': RegNo = X86::DR3; break;
+ case '4': RegNo = X86::DR4; break;
+ case '5': RegNo = X86::DR5; break;
+ case '6': RegNo = X86::DR6; break;
+ case '7': RegNo = X86::DR7; break;
+ }
+
+ if (RegNo != -1) {
+ lexDefinite();
+
+ // FIXME: This is completely wrong when there is a space or other
+ // punctuation between the % and the register name.
StringRef regStr(lexedToken.getString().data(),
lexedToken.getString().size() +
nextToken.getString().size());
-
- return AsmToken(AsmToken::Register,
- regStr,
- static_cast<int64_t>(regID));
- }
- else {
- return AsmToken(lexedToken);
+ return AsmToken(AsmToken::Register, regStr,
+ static_cast<int64_t>(RegNo));
}
}
- else {
- return AsmToken(lexedToken);
- }
+
+
+ return lexedToken;
}
}
}
@@ -113,26 +138,22 @@ AsmToken X86AsmLexer::LexTokenIntel() {
switch(lexedToken.getKind()) {
default:
- return AsmToken(lexedToken);
+ return lexedToken;
case AsmToken::Error:
SetError(Lexer->getErrLoc(), Lexer->getErr());
- return AsmToken(lexedToken);
- case AsmToken::Identifier:
- {
+ return lexedToken;
+ case AsmToken::Identifier: {
std::string upperCase = lexedToken.getString().str();
std::string lowerCase = LowercaseString(upperCase);
StringRef lowerRef(lowerCase);
unsigned regID = MatchRegisterName(lowerRef);
- if (regID) {
+ if (regID)
return AsmToken(AsmToken::Register,
lexedToken.getString(),
static_cast<int64_t>(regID));
- }
- else {
- return AsmToken(lexedToken);
- }
+ return lexedToken;
}
}
}
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 40a6a7b..f1e66ab 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -65,7 +65,7 @@ public:
X86ATTAsmParser(const Target &T, MCAsmParser &_Parser)
: TargetAsmParser(T), Parser(_Parser) {}
- virtual bool ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+ virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands);
virtual bool ParseDirective(AsmToken DirectiveID);
@@ -412,6 +412,28 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo,
return false;
}
+ // If this is "db[0-7]", match it as an alias
+ // for dr[0-7].
+ if (RegNo == 0 && Tok.getString().size() == 3 &&
+ Tok.getString().startswith("db")) {
+ switch (Tok.getString()[2]) {
+ case '0': RegNo = X86::DR0; break;
+ case '1': RegNo = X86::DR1; break;
+ case '2': RegNo = X86::DR2; break;
+ case '3': RegNo = X86::DR3; break;
+ case '4': RegNo = X86::DR4; break;
+ case '5': RegNo = X86::DR5; break;
+ case '6': RegNo = X86::DR6; break;
+ case '7': RegNo = X86::DR7; break;
+ }
+
+ if (RegNo != 0) {
+ EndLoc = Tok.getLoc();
+ Parser.Lex(); // Eat it.
+ return false;
+ }
+ }
+
if (RegNo == 0)
return Error(Tok.getLoc(), "invalid register name");
@@ -580,7 +602,7 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) {
}
bool X86ATTAsmParser::
-ParseInstruction(const StringRef &Name, SMLoc NameLoc,
+ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
// The various flavors of pushf and popf use Requires<In32BitMode> and
// Requires<In64BitMode>, but the assembler doesn't yet implement that.
@@ -590,6 +612,8 @@ ParseInstruction(const StringRef &Name, SMLoc NameLoc,
return Error(NameLoc, "popfl cannot be encoded in 64-bit mode");
else if (Name == "pushfl")
return Error(NameLoc, "pushfl cannot be encoded in 64-bit mode");
+ else if (Name == "pusha")
+ return Error(NameLoc, "pusha cannot be encoded in 64-bit mode");
} else {
if (Name == "popfq")
return Error(NameLoc, "popfq cannot be encoded in 32-bit mode");
@@ -597,6 +621,16 @@ ParseInstruction(const StringRef &Name, SMLoc NameLoc,
return Error(NameLoc, "pushfq cannot be encoded in 32-bit mode");
}
+ // The "Jump if rCX Zero" form jcxz is not allowed in 64-bit mode and
+ // the form jrcxz is not allowed in 32-bit mode.
+ if (Is64Bit) {
+ if (Name == "jcxz")
+ return Error(NameLoc, "jcxz cannot be encoded in 64-bit mode");
+ } else {
+ if (Name == "jrcxz")
+ return Error(NameLoc, "jrcxz cannot be encoded in 32-bit mode");
+ }
+
// FIXME: Hack to recognize "sal..." and "rep..." for now. We need a way to
// represent alternative syntaxes in the .td file, without requiring
// instruction duplication.
@@ -617,6 +651,23 @@ ParseInstruction(const StringRef &Name, SMLoc NameLoc,
.Case("setnz", "setne")
.Case("jz", "je")
.Case("jnz", "jne")
+ .Case("jc", "jb")
+ // FIXME: in 32-bit mode jcxz requires an AdSize prefix. In 64-bit mode
+ // jecxz requires an AdSize prefix but jecxz does not have a prefix in
+ // 32-bit mode.
+ .Case("jecxz", "jcxz")
+ .Case("jrcxz", "jcxz")
+ .Case("jna", "jbe")
+ .Case("jnae", "jb")
+ .Case("jnb", "jae")
+ .Case("jnbe", "ja")
+ .Case("jnc", "jae")
+ .Case("jng", "jle")
+ .Case("jnge", "jl")
+ .Case("jnl", "jge")
+ .Case("jnle", "jg")
+ .Case("jpe", "jp")
+ .Case("jpo", "jnp")
.Case("cmovcl", "cmovbl")
.Case("cmovcl", "cmovbl")
.Case("cmovnal", "cmovbel")
@@ -631,36 +682,64 @@ ParseInstruction(const StringRef &Name, SMLoc NameLoc,
.Case("cmovnlel", "cmovgl")
.Case("cmovnzl", "cmovnel")
.Case("cmovzl", "cmovel")
+ .Case("fwait", "wait")
+ .Case("movzx", "movzb")
.Default(Name);
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
const MCExpr *ExtraImmOp = 0;
- if (PatchedName.startswith("cmp") &&
+ if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
(PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
+ bool IsVCMP = PatchedName.startswith("vcmp");
+ unsigned SSECCIdx = IsVCMP ? 4 : 3;
unsigned SSEComparisonCode = StringSwitch<unsigned>(
- PatchedName.slice(3, PatchedName.size() - 2))
- .Case("eq", 0)
- .Case("lt", 1)
- .Case("le", 2)
- .Case("unord", 3)
- .Case("neq", 4)
- .Case("nlt", 5)
- .Case("nle", 6)
- .Case("ord", 7)
+ PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
+ .Case("eq", 0)
+ .Case("lt", 1)
+ .Case("le", 2)
+ .Case("unord", 3)
+ .Case("neq", 4)
+ .Case("nlt", 5)
+ .Case("nle", 6)
+ .Case("ord", 7)
+ .Case("eq_uq", 8)
+ .Case("nge", 9)
+ .Case("ngt", 0x0A)
+ .Case("false", 0x0B)
+ .Case("neq_oq", 0x0C)
+ .Case("ge", 0x0D)
+ .Case("gt", 0x0E)
+ .Case("true", 0x0F)
+ .Case("eq_os", 0x10)
+ .Case("lt_oq", 0x11)
+ .Case("le_oq", 0x12)
+ .Case("unord_s", 0x13)
+ .Case("neq_us", 0x14)
+ .Case("nlt_uq", 0x15)
+ .Case("nle_uq", 0x16)
+ .Case("ord_s", 0x17)
+ .Case("eq_us", 0x18)
+ .Case("nge_uq", 0x19)
+ .Case("ngt_uq", 0x1A)
+ .Case("false_os", 0x1B)
+ .Case("neq_os", 0x1C)
+ .Case("ge_oq", 0x1D)
+ .Case("gt_oq", 0x1E)
+ .Case("true_us", 0x1F)
.Default(~0U);
if (SSEComparisonCode != ~0U) {
ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
getParser().getContext());
if (PatchedName.endswith("ss")) {
- PatchedName = "cmpss";
+ PatchedName = IsVCMP ? "vcmpss" : "cmpss";
} else if (PatchedName.endswith("sd")) {
- PatchedName = "cmpsd";
+ PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
} else if (PatchedName.endswith("ps")) {
- PatchedName = "cmpps";
+ PatchedName = IsVCMP ? "vcmpps" : "cmpps";
} else {
assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
- PatchedName = "cmppd";
+ PatchedName = IsVCMP ? "vcmppd" : "cmppd";
}
}
}
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
index 0b64cb4..f2cdb5b 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
@@ -85,11 +85,18 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
-void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
+void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
const MCOperand &BaseReg = MI->getOperand(Op);
const MCOperand &IndexReg = MI->getOperand(Op+2);
const MCOperand &DispSpec = MI->getOperand(Op+3);
+ const MCOperand &SegReg = MI->getOperand(Op+4);
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+4, O);
+ O << ':';
+ }
if (DispSpec.isImm()) {
int64_t DispVal = DispSpec.getImm();
@@ -115,13 +122,3 @@ void X86ATTInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
O << ')';
}
}
-
-void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- // If this has a segment register, print it.
- if (MI->getOperand(Op+4).getReg()) {
- printOperand(MI, Op+4, O);
- O << ':';
- }
- printLeaMemReference(MI, Op, O);
-}
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h b/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
index 8d5d508..3be4bae 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h
@@ -34,7 +34,6 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
- void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS);
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS);
void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
@@ -69,14 +68,8 @@ public:
void printf128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
printMemReference(MI, OpNo, O);
}
- void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- printLeaMemReference(MI, OpNo, O);
- }
- void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- printLeaMemReference(MI, OpNo, O);
- }
- void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- printLeaMemReference(MI, OpNo, O);
+ void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ printMemReference(MI, OpNo, O);
}
};
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp b/contrib/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
index 183213d..08e6486 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
@@ -17,7 +17,6 @@
#include "X86IntelInstPrinter.h"
#include "X86MCInstLower.h"
#include "X86.h"
-#include "X86COFF.h"
#include "X86COFFMachineModuleInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
@@ -35,6 +34,7 @@
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/Support/COFF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetOptions.h"
@@ -60,8 +60,10 @@ bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
if (Subtarget->isTargetCOFF()) {
bool Intrn = MF.getFunction()->hasInternalLinkage();
OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
- OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::C_STAT : COFF::C_EXT);
- OutStreamer.EmitCOFFSymbolType(COFF::DT_FCN << COFF::N_BTSHFT);
+ OutStreamer.EmitCOFFSymbolStorageClass(Intrn ? COFF::IMAGE_SYM_CLASS_STATIC
+ : COFF::IMAGE_SYM_CLASS_EXTERNAL);
+ OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
+ << COFF::SCT_COMPLEX_TYPE_SHIFT);
OutStreamer.EndCOFFSymbolDef();
}
@@ -200,6 +202,11 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO,
case X86II::MO_GOT: O << "@GOT"; break;
case X86II::MO_GOTOFF: O << "@GOTOFF"; break;
case X86II::MO_PLT: O << "@PLT"; break;
+ case X86II::MO_TLVP: O << "@TLVP"; break;
+ case X86II::MO_TLVP_PIC_BASE:
+ O << "@TLVP" << '-';
+ PrintPICBaseSymbol(O);
+ break;
}
}
@@ -383,6 +390,8 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
}
if (MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isSymbol()) {
printSymbolOperand(MO, O);
+ if (Subtarget->isPICStyleRIPRel())
+ O << "(%rip)";
return false;
}
if (MO.isReg()) {
@@ -575,8 +584,9 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
E = COFFMMI.externals_end();
I != E; ++I) {
OutStreamer.BeginCOFFSymbolDef(CurrentFnSym);
- OutStreamer.EmitCOFFSymbolStorageClass(COFF::C_EXT);
- OutStreamer.EmitCOFFSymbolType(COFF::DT_FCN << COFF::N_BTSHFT);
+ OutStreamer.EmitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
+ OutStreamer.EmitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
+ << COFF::SCT_COMPLEX_TYPE_SHIFT);
OutStreamer.EndCOFFSymbolDef();
}
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp b/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
index 7e0a9bb..a632047 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp
@@ -81,12 +81,19 @@ void X86IntelInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
-void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
+void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
+ raw_ostream &O) {
const MCOperand &BaseReg = MI->getOperand(Op);
unsigned ScaleVal = MI->getOperand(Op+1).getImm();
const MCOperand &IndexReg = MI->getOperand(Op+2);
const MCOperand &DispSpec = MI->getOperand(Op+3);
+ const MCOperand &SegReg = MI->getOperand(Op+4);
+
+ // If this has a segment register, print it.
+ if (SegReg.getReg()) {
+ printOperand(MI, Op+4, O);
+ O << ':';
+ }
O << '[';
@@ -104,7 +111,7 @@ void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
NeedPlus = true;
}
-
+
if (!DispSpec.isImm()) {
if (NeedPlus) O << " + ";
assert(DispSpec.isExpr() && "non-immediate displacement for LEA?");
@@ -126,13 +133,3 @@ void X86IntelInstPrinter::printLeaMemReference(const MCInst *MI, unsigned Op,
O << ']';
}
-
-void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op,
- raw_ostream &O) {
- // If this has a segment register, print it.
- if (MI->getOperand(Op+4).getReg()) {
- printOperand(MI, Op+4, O);
- O << ':';
- }
- printLeaMemReference(MI, Op, O);
-}
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h b/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
index a0beeb2..4d68074 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h
@@ -36,7 +36,6 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
- void printLeaMemReference(const MCInst *MI, unsigned Op, raw_ostream &O);
void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O);
void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
@@ -81,17 +80,9 @@ public:
O << "XMMWORD PTR ";
printMemReference(MI, OpNo, O);
}
- void printlea32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "DWORD PTR ";
- printLeaMemReference(MI, OpNo, O);
- }
- void printlea64mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "QWORD PTR ";
- printLeaMemReference(MI, OpNo, O);
- }
- void printlea64_32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
- O << "QWORD PTR ";
- printLeaMemReference(MI, OpNo, O);
+ void printf256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
+ O << "YMMWORD PTR ";
+ printMemReference(MI, OpNo, O);
}
};
diff --git a/contrib/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/contrib/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
index 4edeca9..e67fc06 100644
--- a/contrib/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
@@ -152,6 +152,15 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_DARWIN_STUB:
break;
+ case X86II::MO_TLVP: RefKind = MCSymbolRefExpr::VK_TLVP; break;
+ case X86II::MO_TLVP_PIC_BASE:
+ Expr = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
+ // Subtract the pic base.
+ Expr = MCBinaryExpr::CreateSub(Expr,
+ MCSymbolRefExpr::Create(GetPICBaseSymbol(),
+ Ctx),
+ Ctx);
+ break;
case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
@@ -266,10 +275,21 @@ static void SimplifyShortMoveForm(MCInst &Inst, unsigned Opcode) {
return;
// Check whether this is an absolute address.
- if (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
- Inst.getOperand(AddrBase + 2).getReg() != 0 ||
- Inst.getOperand(AddrBase + 4).getReg() != 0 ||
- Inst.getOperand(AddrBase + 1).getImm() != 1)
+ // FIXME: We know TLVP symbol refs aren't, but there should be a better way
+ // to do this here.
+ bool Absolute = true;
+ if (Inst.getOperand(AddrOp).isExpr()) {
+ const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
+ Absolute = false;
+ }
+
+ if (Absolute &&
+ (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 2).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 4).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 1).getImm() != 1))
return;
// If so, rewrite the instruction.
@@ -327,6 +347,15 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
switch (OutMI.getOpcode()) {
case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
lower_lea64_32mem(&OutMI, 1);
+ // FALL THROUGH.
+ case X86::LEA64r:
+ case X86::LEA16r:
+ case X86::LEA32r:
+ // LEA should have a segment register, but it must be empty.
+ assert(OutMI.getNumOperands() == 1+X86::AddrNumOperands &&
+ "Unexpected # of LEA operands");
+ assert(OutMI.getOperand(1+X86::AddrSegmentReg).getReg() == 0 &&
+ "LEA has segment specified!");
break;
case X86::MOVZX16rr8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr8); break;
case X86::MOVZX16rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break;
@@ -364,10 +393,9 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
break;
- // TAILJMPr, TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have
+ // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have
// register inputs modeled as normal uses instead of implicit uses. As such,
// truncate off all but the first operand (the callee). FIXME: Change isel.
- case X86::TAILJMPr:
case X86::TAILJMPr64:
case X86::CALL64r:
case X86::CALL64pcrel32: {
@@ -380,11 +408,20 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
// TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
+ case X86::TAILJMPr:
case X86::TAILJMPd:
case X86::TAILJMPd64: {
+ unsigned Opcode;
+ switch (OutMI.getOpcode()) {
+ default: assert(0 && "Invalid opcode");
+ case X86::TAILJMPr: Opcode = X86::JMP32r; break;
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64: Opcode = X86::JMP_1; break;
+ }
+
MCOperand Saved = OutMI.getOperand(0);
OutMI = MCInst();
- OutMI.setOpcode(X86::TAILJMP_1);
+ OutMI.setOpcode(Opcode);
OutMI.addOperand(Saved);
break;
}
@@ -483,8 +520,12 @@ void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
O << V.getName();
O << " <- ";
// Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(3).isImm());
- O << '['; printOperand(MI, 0, O); O << '+'; printOperand(MI, 3, O);
+ O << '[';
+ if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg())
+ printOperand(MI, 0, O);
+ else
+ O << "undef";
+ O << '+'; printOperand(MI, 3, O);
O << ']';
O << "+";
printOperand(MI, NOps-2, O);
@@ -495,8 +536,9 @@ X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const {
MachineLocation Location;
assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!");
// Frame address. Currently handles register +- offset only.
- assert(MI->getOperand(0).isReg() && MI->getOperand(3).isImm());
- Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm());
+
+ if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm())
+ Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm());
return Location;
}
@@ -513,6 +555,13 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
}
return;
+ case X86::TAILJMPr:
+ case X86::TAILJMPd:
+ case X86::TAILJMPd64:
+ // Lower these as normal, but add some comments.
+ OutStreamer.AddComment("TAILCALL");
+ break;
+
case X86::MOVPC32r: {
MCInst TmpInst;
// This is a pseudo op for a two instruction sequence with a label, which
@@ -578,7 +627,6 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
MCInstLowering.Lower(MI, TmpInst);
-
OutStreamer.EmitInstruction(TmpInst);
}
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/CMakeLists.txt b/contrib/llvm/lib/Target/X86/Disassembler/CMakeLists.txt
index 9f91060..97589c0 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/CMakeLists.txt
+++ b/contrib/llvm/lib/Target/X86/Disassembler/CMakeLists.txt
@@ -4,8 +4,8 @@ add_llvm_library(LLVMX86Disassembler
X86Disassembler.cpp
X86DisassemblerDecoder.c
)
-# workaround for hanging compilation on MSVC9
-if( MSVC_VERSION EQUAL 1500 )
+# workaround for hanging compilation on MSVC9 and 10
+if( MSVC_VERSION EQUAL 1500 OR MSVC_VERSION EQUAL 1600 )
set_property(
SOURCE X86Disassembler.cpp
PROPERTY COMPILE_FLAGS "/Od"
diff --git a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 8a5a630..09f1584 100644
--- a/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/contrib/llvm/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -252,13 +252,8 @@ static bool translateRMRegister(MCInst &mcInst,
/// @param mcInst - The MCInst to append to.
/// @param insn - The instruction to extract Mod, R/M, and SIB fields
/// from.
-/// @param sr - Whether or not to emit the segment register. The
-/// LEA instruction does not expect a segment-register
-/// operand.
/// @return - 0 on success; nonzero otherwise
-static bool translateRMMemory(MCInst &mcInst,
- InternalInstruction &insn,
- bool sr) {
+static bool translateRMMemory(MCInst &mcInst, InternalInstruction &insn) {
// Addresses in an MCInst are represented as five operands:
// 1. basereg (register) The R/M base, or (if there is a SIB) the
// SIB base
@@ -385,10 +380,7 @@ static bool translateRMMemory(MCInst &mcInst,
mcInst.addOperand(scaleAmount);
mcInst.addOperand(indexReg);
mcInst.addOperand(displacement);
-
- if (sr)
- mcInst.addOperand(segmentReg);
-
+ mcInst.addOperand(segmentReg);
return false;
}
@@ -439,9 +431,8 @@ static bool translateRM(MCInst &mcInst,
case TYPE_M1616:
case TYPE_M1632:
case TYPE_M1664:
- return translateRMMemory(mcInst, insn, true);
case TYPE_LEA:
- return translateRMMemory(mcInst, insn, false);
+ return translateRMMemory(mcInst, insn);
}
}
diff --git a/contrib/llvm/lib/Target/X86/README-SSE.txt b/contrib/llvm/lib/Target/X86/README-SSE.txt
index e5f84e8..b6aba93 100644
--- a/contrib/llvm/lib/Target/X86/README-SSE.txt
+++ b/contrib/llvm/lib/Target/X86/README-SSE.txt
@@ -36,62 +36,6 @@ The pattern isel got this one right.
//===---------------------------------------------------------------------===//
-SSE doesn't have [mem] op= reg instructions. If we have an SSE instruction
-like this:
-
- X += y
-
-and the register allocator decides to spill X, it is cheaper to emit this as:
-
-Y += [xslot]
-store Y -> [xslot]
-
-than as:
-
-tmp = [xslot]
-tmp += y
-store tmp -> [xslot]
-
-..and this uses one fewer register (so this should be done at load folding
-time, not at spiller time). *Note* however that this can only be done
-if Y is dead. Here's a testcase:
-
-@.str_3 = external global [15 x i8]
-declare void @printf(i32, ...)
-define void @main() {
-build_tree.exit:
- br label %no_exit.i7
-
-no_exit.i7: ; preds = %no_exit.i7, %build_tree.exit
- %tmp.0.1.0.i9 = phi double [ 0.000000e+00, %build_tree.exit ],
- [ %tmp.34.i18, %no_exit.i7 ]
- %tmp.0.0.0.i10 = phi double [ 0.000000e+00, %build_tree.exit ],
- [ %tmp.28.i16, %no_exit.i7 ]
- %tmp.28.i16 = fadd double %tmp.0.0.0.i10, 0.000000e+00
- %tmp.34.i18 = fadd double %tmp.0.1.0.i9, 0.000000e+00
- br i1 false, label %Compute_Tree.exit23, label %no_exit.i7
-
-Compute_Tree.exit23: ; preds = %no_exit.i7
- tail call void (i32, ...)* @printf( i32 0 )
- store double %tmp.34.i18, double* null
- ret void
-}
-
-We currently emit:
-
-.BBmain_1:
- xorpd %XMM1, %XMM1
- addsd %XMM0, %XMM1
-*** movsd %XMM2, QWORD PTR [%ESP + 8]
-*** addsd %XMM2, %XMM1
-*** movsd QWORD PTR [%ESP + 8], %XMM2
- jmp .BBmain_1 # no_exit.i7
-
-This is a bugpoint reduced testcase, which is why the testcase doesn't make
-much sense (e.g. its an infinite loop). :)
-
-//===---------------------------------------------------------------------===//
-
SSE should implement 'select_cc' using 'emulated conditional moves' that use
pcmp/pand/pandn/por to do a selection instead of a conditional branch:
@@ -122,12 +66,6 @@ LBB_X_2:
//===---------------------------------------------------------------------===//
-It's not clear whether we should use pxor or xorps / xorpd to clear XMM
-registers. The choice may depend on subtarget information. We should do some
-more experiments on different x86 machines.
-
-//===---------------------------------------------------------------------===//
-
Lower memcpy / memset to a series of SSE 128 bit move instructions when it's
feasible.
@@ -151,45 +89,6 @@ Perhaps use pxor / xorp* to clear a XMM register first?
//===---------------------------------------------------------------------===//
-How to decide when to use the "floating point version" of logical ops? Here are
-some code fragments:
-
- movaps LCPI5_5, %xmm2
- divps %xmm1, %xmm2
- mulps %xmm2, %xmm3
- mulps 8656(%ecx), %xmm3
- addps 8672(%ecx), %xmm3
- andps LCPI5_6, %xmm2
- andps LCPI5_1, %xmm3
- por %xmm2, %xmm3
- movdqa %xmm3, (%edi)
-
- movaps LCPI5_5, %xmm1
- divps %xmm0, %xmm1
- mulps %xmm1, %xmm3
- mulps 8656(%ecx), %xmm3
- addps 8672(%ecx), %xmm3
- andps LCPI5_6, %xmm1
- andps LCPI5_1, %xmm3
- orps %xmm1, %xmm3
- movaps %xmm3, 112(%esp)
- movaps %xmm3, (%ebx)
-
-Due to some minor source change, the later case ended up using orps and movaps
-instead of por and movdqa. Does it matter?
-
-//===---------------------------------------------------------------------===//
-
-X86RegisterInfo::copyRegToReg() returns X86::MOVAPSrr for VR128. Is it possible
-to choose between movaps, movapd, and movdqa based on types of source and
-destination?
-
-How about andps, andpd, and pand? Do we really care about the type of the packed
-elements? If not, why not always use the "ps" variants which are likely to be
-shorter.
-
-//===---------------------------------------------------------------------===//
-
External test Nurbs exposed some problems. Look for
__ZN15Nurbs_SSE_Cubic17TessellateSurfaceE, bb cond_next140. This is what icc
emits:
@@ -278,41 +177,6 @@ It also exposes some other problems. See MOV32ri -3 and the spills.
//===---------------------------------------------------------------------===//
-http://gcc.gnu.org/bugzilla/show_bug.cgi?id=25500
-
-LLVM is producing bad code.
-
-LBB_main_4: # cond_true44
- addps %xmm1, %xmm2
- subps %xmm3, %xmm2
- movaps (%ecx), %xmm4
- movaps %xmm2, %xmm1
- addps %xmm4, %xmm1
- addl $16, %ecx
- incl %edx
- cmpl $262144, %edx
- movaps %xmm3, %xmm2
- movaps %xmm4, %xmm3
- jne LBB_main_4 # cond_true44
-
-There are two problems. 1) No need to two loop induction variables. We can
-compare against 262144 * 16. 2) Known register coalescer issue. We should
-be able eliminate one of the movaps:
-
- addps %xmm2, %xmm1 <=== Commute!
- subps %xmm3, %xmm1
- movaps (%ecx), %xmm4
- movaps %xmm1, %xmm1 <=== Eliminate!
- addps %xmm4, %xmm1
- addl $16, %ecx
- incl %edx
- cmpl $262144, %edx
- movaps %xmm3, %xmm2
- movaps %xmm4, %xmm3
- jne LBB_main_4 # cond_true44
-
-//===---------------------------------------------------------------------===//
-
Consider:
__m128 test(float a) {
@@ -382,22 +246,6 @@ elements are fixed zeros.
//===---------------------------------------------------------------------===//
-__m128d test1( __m128d A, __m128d B) {
- return _mm_shuffle_pd(A, B, 0x3);
-}
-
-compiles to
-
-shufpd $3, %xmm1, %xmm0
-
-Perhaps it's better to use unpckhpd instead?
-
-unpckhpd %xmm1, %xmm0
-
-Don't know if unpckhpd is faster. But it is shorter.
-
-//===---------------------------------------------------------------------===//
-
This code generates ugly code, probably due to costs being off or something:
define void @test(float* %P, <4 x float>* %P2 ) {
@@ -549,6 +397,7 @@ entry:
%tmp20 = tail call i64 @ccoshf( float %tmp6, float %z.0 ) nounwind readonly
ret i64 %tmp20
}
+declare i64 @ccoshf(float %z.0, float %z.1) nounwind readonly
This currently compiles to:
@@ -987,3 +836,34 @@ This would be better kept in the SSE unit by treating XMM0 as a 4xfloat and
doing a shuffle from v[1] to v[0] then a float store.
//===---------------------------------------------------------------------===//
+
+On SSE4 machines, we compile this code:
+
+define <2 x float> @test2(<2 x float> %Q, <2 x float> %R,
+ <2 x float> *%P) nounwind {
+ %Z = fadd <2 x float> %Q, %R
+
+ store <2 x float> %Z, <2 x float> *%P
+ ret <2 x float> %Z
+}
+
+into:
+
+_test2: ## @test2
+## BB#0:
+ insertps $0, %xmm2, %xmm2
+ insertps $16, %xmm3, %xmm2
+ insertps $0, %xmm0, %xmm3
+ insertps $16, %xmm1, %xmm3
+ addps %xmm2, %xmm3
+ movq %xmm3, (%rdi)
+ movaps %xmm3, %xmm0
+ pshufd $1, %xmm3, %xmm1
+ ## kill: XMM1<def> XMM1<kill>
+ ret
+
+The insertps's of $0 are pointless complex copies.
+
+//===---------------------------------------------------------------------===//
+
+
diff --git a/contrib/llvm/lib/Target/X86/README-X86-64.txt b/contrib/llvm/lib/Target/X86/README-X86-64.txt
index e8f7c5d..78c4dc0 100644
--- a/contrib/llvm/lib/Target/X86/README-X86-64.txt
+++ b/contrib/llvm/lib/Target/X86/README-X86-64.txt
@@ -1,27 +1,5 @@
//===- README_X86_64.txt - Notes for X86-64 code gen ----------------------===//
-Implement different PIC models? Right now we only support Mac OS X with small
-PIC code model.
-
-//===---------------------------------------------------------------------===//
-
-For this:
-
-extern void xx(void);
-void bar(void) {
- xx();
-}
-
-gcc compiles to:
-
-.globl _bar
-_bar:
- jmp _xx
-
-We need to do the tailcall optimization as well.
-
-//===---------------------------------------------------------------------===//
-
AMD64 Optimization Manual 8.2 has some nice information about optimizing integer
multiplication by a constant. How much of it applies to Intel's X86-64
implementation? There are definite trade-offs to consider: latency vs. register
@@ -96,123 +74,14 @@ gcc:
movq %rax, (%rdx)
ret
-//===---------------------------------------------------------------------===//
-
-Vararg function prologue can be further optimized. Currently all XMM registers
-are stored into register save area. Most of them can be eliminated since the
-upper bound of the number of XMM registers used are passed in %al. gcc produces
-something like the following:
-
- movzbl %al, %edx
- leaq 0(,%rdx,4), %rax
- leaq 4+L2(%rip), %rdx
- leaq 239(%rsp), %rax
- jmp *%rdx
- movaps %xmm7, -15(%rax)
- movaps %xmm6, -31(%rax)
- movaps %xmm5, -47(%rax)
- movaps %xmm4, -63(%rax)
- movaps %xmm3, -79(%rax)
- movaps %xmm2, -95(%rax)
- movaps %xmm1, -111(%rax)
- movaps %xmm0, -127(%rax)
-L2:
-
-It jumps over the movaps that do not need to be stored. Hard to see this being
-significant as it added 5 instruciton (including a indirect branch) to avoid
-executing 0 to 8 stores in the function prologue.
-
-Perhaps we can optimize for the common case where no XMM registers are used for
-parameter passing. i.e. is %al == 0 jump over all stores. Or in the case of a
-leaf function where we can determine that no XMM input parameter is need, avoid
-emitting the stores at all.
-
-//===---------------------------------------------------------------------===//
+And the codegen is even worse for the following
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33103):
+ void fill1(char *s, int a)
+ {
+ __builtin_memset(s, a, 15);
+ }
-AMD64 has a complex calling convention for aggregate passing by value:
-
-1. If the size of an object is larger than two eightbytes, or in C++, is a non-
- POD structure or union type, or contains unaligned fields, it has class
- MEMORY.
-2. Both eightbytes get initialized to class NO_CLASS.
-3. Each field of an object is classified recursively so that always two fields
- are considered. The resulting class is calculated according to the classes
- of the fields in the eightbyte:
- (a) If both classes are equal, this is the resulting class.
- (b) If one of the classes is NO_CLASS, the resulting class is the other
- class.
- (c) If one of the classes is MEMORY, the result is the MEMORY class.
- (d) If one of the classes is INTEGER, the result is the INTEGER.
- (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, MEMORY is used as
- class.
- (f) Otherwise class SSE is used.
-4. Then a post merger cleanup is done:
- (a) If one of the classes is MEMORY, the whole argument is passed in memory.
- (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
-
-Currently llvm frontend does not handle this correctly.
-
-Problem 1:
- typedef struct { int i; double d; } QuadWordS;
-It is currently passed in two i64 integer registers. However, gcc compiled
-callee expects the second element 'd' to be passed in XMM0.
-
-Problem 2:
- typedef struct { int32_t i; float j; double d; } QuadWordS;
-The size of the first two fields == i64 so they will be combined and passed in
-a integer register RDI. The third field is still passed in XMM0.
-
-Problem 3:
- typedef struct { int64_t i; int8_t j; int64_t d; } S;
- void test(S s)
-The size of this aggregate is greater than two i64 so it should be passed in
-memory. Currently llvm breaks this down and passed it in three integer
-registers.
-
-Problem 4:
-Taking problem 3 one step ahead where a function expects a aggregate value
-in memory followed by more parameter(s) passed in register(s).
- void test(S s, int b)
-
-LLVM IR does not allow parameter passing by aggregates, therefore it must break
-the aggregates value (in problem 3 and 4) into a number of scalar values:
- void %test(long %s.i, byte %s.j, long %s.d);
-
-However, if the backend were to lower this code literally it would pass the 3
-values in integer registers. To force it be passed in memory, the frontend
-should change the function signiture to:
- void %test(long %undef1, long %undef2, long %undef3, long %undef4,
- long %undef5, long %undef6,
- long %s.i, byte %s.j, long %s.d);
-And the callee would look something like this:
- call void %test( undef, undef, undef, undef, undef, undef,
- %tmp.s.i, %tmp.s.j, %tmp.s.d );
-The first 6 undef parameters would exhaust the 6 integer registers used for
-parameter passing. The following three integer values would then be forced into
-memory.
-
-For problem 4, the parameter 'd' would be moved to the front of the parameter
-list so it will be passed in register:
- void %test(int %d,
- long %undef1, long %undef2, long %undef3, long %undef4,
- long %undef5, long %undef6,
- long %s.i, byte %s.j, long %s.d);
-
-//===---------------------------------------------------------------------===//
-
-Right now the asm printer assumes GlobalAddress are accessed via RIP relative
-addressing. Therefore, it is not possible to generate this:
- movabsq $__ZTV10polynomialIdE+16, %rax
-
-That is ok for now since we currently only support small model. So the above
-is selected as
- leaq __ZTV10polynomialIdE+16(%rip), %rax
-
-This is probably slightly slower but is much shorter than movabsq. However, if
-we were to support medium or larger code models, we need to use the movabs
-instruction. We should probably introduce something like AbsoluteAddress to
-distinguish it from GlobalAddress so the asm printer and JIT code emitter can
-do the right thing.
+For this version, we duplicate the computation of the constant to store.
//===---------------------------------------------------------------------===//
@@ -298,3 +167,107 @@ be able to recognize the zero extend. This could also presumably be implemented
if we have whole-function selectiondags.
//===---------------------------------------------------------------------===//
+
+Take the following C code
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43640):
+
+struct u1
+{
+ float x;
+ float y;
+};
+
+float foo(struct u1 u)
+{
+ return u.x + u.y;
+}
+
+Optimizes to the following IR:
+define float @foo(double %u.0) nounwind readnone {
+entry:
+ %tmp8 = bitcast double %u.0 to i64 ; <i64> [#uses=2]
+ %tmp6 = trunc i64 %tmp8 to i32 ; <i32> [#uses=1]
+ %tmp7 = bitcast i32 %tmp6 to float ; <float> [#uses=1]
+ %tmp2 = lshr i64 %tmp8, 32 ; <i64> [#uses=1]
+ %tmp3 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
+ %tmp4 = bitcast i32 %tmp3 to float ; <float> [#uses=1]
+ %0 = fadd float %tmp7, %tmp4 ; <float> [#uses=1]
+ ret float %0
+}
+
+And current llvm-gcc/clang output:
+ movd %xmm0, %rax
+ movd %eax, %xmm1
+ shrq $32, %rax
+ movd %eax, %xmm0
+ addss %xmm1, %xmm0
+ ret
+
+We really shouldn't move the floats to RAX, only to immediately move them
+straight back to the XMM registers.
+
+There really isn't any good way to handle this purely in IR optimizers; it
+could possibly be handled by changing the output of the fronted, though. It
+would also be feasible to add a x86-specific DAGCombine to optimize the
+bitcast+trunc+(lshr+)bitcast combination.
+
+//===---------------------------------------------------------------------===//
+
+Take the following code
+(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34653):
+extern unsigned long table[];
+unsigned long foo(unsigned char *p) {
+ unsigned long tag = *p;
+ return table[tag >> 4] + table[tag & 0xf];
+}
+
+Current code generated:
+ movzbl (%rdi), %eax
+ movq %rax, %rcx
+ andq $240, %rcx
+ shrq %rcx
+ andq $15, %rax
+ movq table(,%rax,8), %rax
+ addq table(%rcx), %rax
+ ret
+
+Issues:
+1. First movq should be movl; saves a byte.
+2. Both andq's should be andl; saves another two bytes. I think this was
+ implemented at one point, but subsequently regressed.
+3. shrq should be shrl; saves another byte.
+4. The first andq can be completely eliminated by using a slightly more
+ expensive addressing mode.
+
+//===---------------------------------------------------------------------===//
+
+Consider the following (contrived testcase, but contains common factors):
+
+#include <stdarg.h>
+int test(int x, ...) {
+ int sum, i;
+ va_list l;
+ va_start(l, x);
+ for (i = 0; i < x; i++)
+ sum += va_arg(l, int);
+ va_end(l);
+ return sum;
+}
+
+Testcase given in C because fixing it will likely involve changing the IR
+generated for it. The primary issue with the result is that it doesn't do any
+of the optimizations which are possible if we know the address of a va_list
+in the current function is never taken:
+1. We shouldn't spill the XMM registers because we only call va_arg with "int".
+2. It would be nice if we could scalarrepl the va_list.
+3. Probably overkill, but it'd be cool if we could peel off the first five
+iterations of the loop.
+
+Other optimizations involving functions which use va_arg on floats which don't
+have the address of a va_list taken:
+1. Conversely to the above, we shouldn't spill general registers if we only
+ call va_arg on "double".
+2. If we know nothing more than 64 bits wide is read from the XMM registers,
+ we can change the spilling code to reduce the amount of stack used by half.
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/X86/README.txt b/contrib/llvm/lib/Target/X86/README.txt
index d4545a6..efc0cd8 100644
--- a/contrib/llvm/lib/Target/X86/README.txt
+++ b/contrib/llvm/lib/Target/X86/README.txt
@@ -1103,57 +1103,6 @@ be folded into: shl [mem], 1
//===---------------------------------------------------------------------===//
-This testcase misses a read/modify/write opportunity (from PR1425):
-
-void vertical_decompose97iH1(int *b0, int *b1, int *b2, int width){
- int i;
- for(i=0; i<width; i++)
- b1[i] += (1*(b0[i] + b2[i])+0)>>0;
-}
-
-We compile it down to:
-
-LBB1_2: # bb
- movl (%esi,%edi,4), %ebx
- addl (%ecx,%edi,4), %ebx
- addl (%edx,%edi,4), %ebx
- movl %ebx, (%ecx,%edi,4)
- incl %edi
- cmpl %eax, %edi
- jne LBB1_2 # bb
-
-the inner loop should add to the memory location (%ecx,%edi,4), saving
-a mov. Something like:
-
- movl (%esi,%edi,4), %ebx
- addl (%edx,%edi,4), %ebx
- addl %ebx, (%ecx,%edi,4)
-
-Here is another interesting example:
-
-void vertical_compose97iH1(int *b0, int *b1, int *b2, int width){
- int i;
- for(i=0; i<width; i++)
- b1[i] -= (1*(b0[i] + b2[i])+0)>>0;
-}
-
-We miss the r/m/w opportunity here by using 2 subs instead of an add+sub[mem]:
-
-LBB9_2: # bb
- movl (%ecx,%edi,4), %ebx
- subl (%esi,%edi,4), %ebx
- subl (%edx,%edi,4), %ebx
- movl %ebx, (%ecx,%edi,4)
- incl %edi
- cmpl %eax, %edi
- jne LBB9_2 # bb
-
-Additionally, LSR should rewrite the exit condition of these loops to use
-a stride-4 IV, would would allow all the scales in the loop to go away.
-This would result in smaller code and more efficient microops.
-
-//===---------------------------------------------------------------------===//
-
In SSE mode, we turn abs and neg into a load from the constant pool plus a xor
or and instruction, for example:
@@ -1301,15 +1250,8 @@ FirstOnet:
xorl %eax, %eax
ret
-There are a few possible improvements here:
-1. We should be able to eliminate the dead load into %ecx
-2. We could change the "movl 8(%esp), %eax" into
- "movzwl 10(%esp), %eax"; this lets us change the cmpl
- into a testl, which is shorter, and eliminate the shift.
-
-We could also in theory eliminate the branch by using a conditional
-for the address of the load, but that seems unlikely to be worthwhile
-in general.
+We could change the "movl 8(%esp), %eax" into "movzwl 10(%esp), %eax"; this
+lets us change the cmpl into a testl, which is shorter, and eliminate the shift.
//===---------------------------------------------------------------------===//
@@ -1331,22 +1273,23 @@ bb7: ; preds = %entry
to:
-_foo:
+foo: # @foo
+# BB#0: # %entry
+ movl 4(%esp), %ecx
cmpb $0, 16(%esp)
- movl 12(%esp), %ecx
+ je .LBB0_2
+# BB#1: # %bb
movl 8(%esp), %eax
- movl 4(%esp), %edx
- je LBB1_2 # bb7
-LBB1_1: # bb
- addl %edx, %eax
+ addl %ecx, %eax
ret
-LBB1_2: # bb7
- movl %edx, %eax
- subl %ecx, %eax
+.LBB0_2: # %bb7
+ movl 12(%esp), %edx
+ movl %ecx, %eax
+ subl %edx, %eax
ret
-The coalescer could coalesce "edx" with "eax" to avoid the movl in LBB1_2
-if it commuted the addl in LBB1_1.
+There's an obviously unnecessary movl in .LBB0_2, and we could eliminate a
+couple more movls by putting 4(%esp) into %eax instead of %ecx.
//===---------------------------------------------------------------------===//
@@ -1396,8 +1339,7 @@ Also check why xmm7 is not used at all in the function.
//===---------------------------------------------------------------------===//
-Legalize loses track of the fact that bools are always zero extended when in
-memory. This causes us to compile abort_gzip (from 164.gzip) from:
+Take the following:
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin8"
@@ -1416,16 +1358,15 @@ bb4.i: ; preds = %entry
}
declare void @exit(i32) noreturn nounwind
-into:
-
-_abort_gzip:
+This compiles into:
+_abort_gzip: ## @abort_gzip
+## BB#0: ## %entry
subl $12, %esp
movb _in_exit.4870.b, %al
- notb %al
- testb $1, %al
- jne LBB1_2 ## bb4.i
-LBB1_1: ## bb.i
- ...
+ cmpb $1, %al
+ jne LBB0_2
+
+We somehow miss folding the movb into the cmpb.
//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/X86/X86.h b/contrib/llvm/lib/Target/X86/X86.h
index 22e89a5..677781d 100644
--- a/contrib/llvm/lib/Target/X86/X86.h
+++ b/contrib/llvm/lib/Target/X86/X86.h
@@ -35,6 +35,10 @@ class formatted_raw_ostream;
FunctionPass *createX86ISelDag(X86TargetMachine &TM,
CodeGenOpt::Level OptLevel);
+/// createGlobalBaseRegPass - This pass initializes a global base
+/// register for PIC on x86-32.
+FunctionPass* createGlobalBaseRegPass();
+
/// createX86FloatingPointStackifierPass - This function returns a pass which
/// converts floating point register references and pseudo instructions into
/// floating point stack references and physical instructions.
diff --git a/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
index 151087f..2cf65c1 100644
--- a/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/X86AsmBackend.cpp
@@ -23,13 +23,13 @@
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
-namespace {
static unsigned getFixupKindLog2Size(unsigned Kind) {
switch (Kind) {
default: assert(0 && "invalid fixup kind!");
case X86::reloc_pcrel_1byte:
case FK_Data_1: return 0;
+ case X86::reloc_pcrel_2byte:
case FK_Data_2: return 1;
case X86::reloc_pcrel_4byte:
case X86::reloc_riprel_4byte:
@@ -39,6 +39,7 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
}
}
+namespace {
class X86AsmBackend : public TargetAsmBackend {
public:
X86AsmBackend(const Target &T)
@@ -60,6 +61,7 @@ public:
bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
};
+} // end anonymous namespace
static unsigned getRelaxedOpcode(unsigned Op) {
switch (Op) {
@@ -75,7 +77,6 @@ static unsigned getRelaxedOpcode(unsigned Op) {
case X86::JG_1: return X86::JG_4;
case X86::JLE_1: return X86::JLE_4;
case X86::JL_1: return X86::JL_4;
- case X86::TAILJMP_1:
case X86::JMP_1: return X86::JMP_4;
case X86::JNE_1: return X86::JNE_4;
case X86::JNO_1: return X86::JNO_4;
@@ -180,6 +181,7 @@ bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
/* *** */
+namespace {
class ELFX86AsmBackend : public X86AsmBackend {
public:
ELFX86AsmBackend(const Target &T)
@@ -281,7 +283,7 @@ public:
}
};
-}
+} // end anonymous namespace
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
const std::string &TT) {
diff --git a/contrib/llvm/lib/Target/X86/X86COFF.h b/contrib/llvm/lib/Target/X86/X86COFF.h
deleted file mode 100644
index 0a8e4e6..0000000
--- a/contrib/llvm/lib/Target/X86/X86COFF.h
+++ /dev/null
@@ -1,95 +0,0 @@
-//===--- X86COFF.h - Some definitions from COFF documentations ------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file just defines some symbols found in COFF documentation. They are
-// used to emit function type information for COFF targets (Cygwin/Mingw32).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86COFF_H
-#define X86COFF_H
-
-namespace COFF
-{
-/// Storage class tells where and what the symbol represents
-enum StorageClass {
- C_EFCN = -1, ///< Physical end of function
- C_NULL = 0, ///< No symbol
- C_AUTO = 1, ///< External definition
- C_EXT = 2, ///< External symbol
- C_STAT = 3, ///< Static
- C_REG = 4, ///< Register variable
- C_EXTDEF = 5, ///< External definition
- C_LABEL = 6, ///< Label
- C_ULABEL = 7, ///< Undefined label
- C_MOS = 8, ///< Member of structure
- C_ARG = 9, ///< Function argument
- C_STRTAG = 10, ///< Structure tag
- C_MOU = 11, ///< Member of union
- C_UNTAG = 12, ///< Union tag
- C_TPDEF = 13, ///< Type definition
- C_USTATIC = 14, ///< Undefined static
- C_ENTAG = 15, ///< Enumeration tag
- C_MOE = 16, ///< Member of enumeration
- C_REGPARM = 17, ///< Register parameter
- C_FIELD = 18, ///< Bit field
-
- C_BLOCK = 100, ///< ".bb" or ".eb" - beginning or end of block
- C_FCN = 101, ///< ".bf" or ".ef" - beginning or end of function
- C_EOS = 102, ///< End of structure
- C_FILE = 103, ///< File name
- C_LINE = 104, ///< Line number, reformatted as symbol
- C_ALIAS = 105, ///< Duplicate tag
- C_HIDDEN = 106 ///< External symbol in dmert public lib
-};
-
-/// The type of the symbol. This is made up of a base type and a derived type.
-/// For example, pointer to int is "pointer to T" and "int"
-enum SymbolType {
- T_NULL = 0, ///< No type info
- T_ARG = 1, ///< Void function argument (only used by compiler)
- T_VOID = 1, ///< The same as above. Just named differently in some specs.
- T_CHAR = 2, ///< Character
- T_SHORT = 3, ///< Short integer
- T_INT = 4, ///< Integer
- T_LONG = 5, ///< Long integer
- T_FLOAT = 6, ///< Floating point
- T_DOUBLE = 7, ///< Double word
- T_STRUCT = 8, ///< Structure
- T_UNION = 9, ///< Union
- T_ENUM = 10, ///< Enumeration
- T_MOE = 11, ///< Member of enumeration
- T_UCHAR = 12, ///< Unsigned character
- T_USHORT = 13, ///< Unsigned short
- T_UINT = 14, ///< Unsigned integer
- T_ULONG = 15 ///< Unsigned long
-};
-
-/// Derived type of symbol
-enum SymbolDerivedType {
- DT_NON = 0, ///< No derived type
- DT_PTR = 1, ///< Pointer to T
- DT_FCN = 2, ///< Function returning T
- DT_ARY = 3 ///< Array of T
-};
-
-/// Masks for extracting parts of type
-enum SymbolTypeMasks {
- N_BTMASK = 017, ///< Mask for base type
- N_TMASK = 060 ///< Mask for derived type
-};
-
-/// Offsets of parts of type
-enum Shifts {
- N_BTSHFT = 4 ///< Type is formed as (base + derived << N_BTSHIFT)
-};
-
-}
-
-#endif // X86COFF_H
diff --git a/contrib/llvm/lib/Target/X86/X86CallingConv.td b/contrib/llvm/lib/Target/X86/X86CallingConv.td
index a5774e1..a6a1e4e 100644
--- a/contrib/llvm/lib/Target/X86/X86CallingConv.td
+++ b/contrib/llvm/lib/Target/X86/X86CallingConv.td
@@ -42,7 +42,7 @@ def RetCC_X86Common : CallingConv<[
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToReg<[MM0]>>,
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>,
// Long double types are always returned in ST0 (even with SSE).
CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
@@ -89,7 +89,7 @@ def RetCC_X86_64_C : CallingConv<[
// returned in RAX. This disagrees with ABI documentation but is bug
// compatible with gcc.
CCIfType<[v1i64], CCAssignToReg<[RAX]>>,
- CCIfType<[v8i8, v4i16, v2i32, v2f32], CCAssignToReg<[XMM0, XMM1]>>,
+ CCIfType<[v8i8, v4i16, v2i32], CCAssignToReg<[XMM0, XMM1]>>,
CCDelegateTo<RetCC_X86Common>
]>;
@@ -155,7 +155,7 @@ def CC_X86_64_C : CallingConv<[
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
- CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32],
CCIfSubtarget<"isTargetDarwin()",
CCIfSubtarget<"hasSSE2()",
CCPromoteToType<v2i64>>>>,
@@ -177,7 +177,7 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32], CCAssignToStack<8, 8>>
+ CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
]>;
// Calling convention used on Win64
@@ -195,7 +195,7 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
// The first 4 MMX vector arguments are passed in GPRs.
- CCIfType<[v8i8, v4i16, v2i32, v1i64, v2f32],
+ CCIfType<[v8i8, v4i16, v2i32, v1i64],
CCBitConvertToType<i64>>,
// The first 4 integer arguments are passed in integer registers.
@@ -254,7 +254,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 3 __m64 (except for v1i64) vector arguments are passed in mmx
// registers if the call is not a vararg call.
- CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32, v2f32],
+ CCIfNotVarArg<CCIfType<[v8i8, v4i16, v2i32],
CCAssignToReg<[MM0, MM1, MM2]>>>,
// Integer/Float values get stored in stack slots that are 4 bytes in
diff --git a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
index 8f02604..f13669b 100644
--- a/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86CodeEmitter.cpp
@@ -138,7 +138,7 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
// MOVPC32r is basically a call plus a pop instruction.
if (Desc.getOpcode() == X86::MOVPC32r)
emitInstruction(*I, &II->get(X86::POP32r));
- NumEmitted++; // Keep track of the # of mi's emitted
+ ++NumEmitted; // Keep track of the # of mi's emitted
}
}
} while (MCE.finishFunction(MF));
@@ -730,9 +730,9 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
case X86II::MRMDestMem: {
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp,
- getX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)
+ getX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)
.getReg()));
- CurOp += X86AddrNumOperands + 1;
+ CurOp += X86::AddrNumOperands + 1;
if (CurOp != NumOps)
emitConstant(MI.getOperand(CurOp++).getImm(),
X86II::getSizeOfImm(Desc->TSFlags));
@@ -750,13 +750,7 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
break;
case X86II::MRMSrcMem: {
- // FIXME: Maybe lea should have its own form?
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
+ int AddrOperands = X86::AddrNumOperands;
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
X86II::getSizeOfImm(Desc->TSFlags) : 0;
@@ -810,14 +804,14 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m: {
- intptr_t PCAdj = (CurOp + X86AddrNumOperands != NumOps) ?
- (MI.getOperand(CurOp+X86AddrNumOperands).isImm() ?
+ intptr_t PCAdj = (CurOp + X86::AddrNumOperands != NumOps) ?
+ (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ?
X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
MCE.emitByte(BaseOpcode);
emitMemModRMByte(MI, CurOp, (Desc->TSFlags & X86II::FormMask)-X86II::MRM0m,
PCAdj);
- CurOp += X86AddrNumOperands;
+ CurOp += X86::AddrNumOperands;
if (CurOp == NumOps)
break;
diff --git a/contrib/llvm/lib/Target/X86/X86FastISel.cpp b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
index 1bc5eb7..ce13707 100644
--- a/contrib/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FastISel.cpp
@@ -23,7 +23,9 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -52,20 +54,7 @@ class X86FastISel : public FastISel {
bool X86ScalarSSEf32;
public:
- explicit X86FastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &pn
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- )
- : FastISel(mf, vm, bm, am, pn
-#ifndef NDEBUG
- , cil
-#endif
- ) {
+ explicit X86FastISel(FunctionLoweringInfo &funcInfo) : FastISel(funcInfo) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
X86ScalarSSEf64 = Subtarget->hasSSE2();
@@ -96,6 +85,8 @@ private:
bool X86SelectStore(const Instruction *I);
+ bool X86SelectRet(const Instruction *I);
+
bool X86SelectCmp(const Instruction *I);
bool X86SelectZExt(const Instruction *I);
@@ -117,6 +108,7 @@ private:
bool X86SelectCall(const Instruction *I);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
+ CCAssignFn *CCAssignFnForRet(CallingConv::ID CC, bool isTailCall = false);
const X86InstrInfo *getInstrInfo() const {
return getTargetMachine()->getInstrInfo();
@@ -190,6 +182,20 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
return CC_X86_32_C;
}
+/// CCAssignFnForRet - Selects the correct CCAssignFn for a given calling
+/// convention.
+CCAssignFn *X86FastISel::CCAssignFnForRet(CallingConv::ID CC,
+ bool isTaillCall) {
+ if (Subtarget->is64Bit()) {
+ if (Subtarget->isTargetWin64())
+ return RetCC_X86_Win64_C;
+ else
+ return RetCC_X86_64_C;
+ }
+
+ return RetCC_X86_32_C;
+}
+
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
/// Return true and the result register by reference if it is possible.
@@ -242,7 +248,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
}
ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc), ResultReg), AM);
return true;
}
@@ -261,7 +268,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
Val = AndResult;
}
@@ -278,7 +285,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
break;
}
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM).addReg(Val);
return true;
}
@@ -306,7 +314,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
}
if (Opc) {
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM)
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
+ DL, TII.get(Opc)), AM)
.addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue());
return true;
@@ -342,6 +351,12 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ // Don't walk into other basic blocks; it's possible we haven't
+ // visited them yet, so the instructions may not yet be assigned
+ // virtual registers.
+ if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB)
+ return false;
+
Opcode = I->getOpcode();
U = I;
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
@@ -349,6 +364,12 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
U = C;
}
+ if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
switch (Opcode) {
default: break;
case Instruction::BitCast:
@@ -370,8 +391,9 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
case Instruction::Alloca: {
// Do static allocas.
const AllocaInst *A = cast<AllocaInst>(V);
- DenseMap<const AllocaInst*, int>::iterator SI = StaticAllocaMap.find(A);
- if (SI != StaticAllocaMap.end()) {
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(A);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = SI->second;
return true;
@@ -411,20 +433,33 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
Disp += SL->getElementOffset(Idx);
} else {
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
- if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
- // Constant-offset addressing.
- Disp += CI->getSExtValue() * S;
- } else if (IndexReg == 0 &&
- (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
- (S == 1 || S == 2 || S == 4 || S == 8)) {
- // Scaled-index addressing.
- Scale = S;
- IndexReg = getRegForGEPIndex(Op).first;
- if (IndexReg == 0)
- return false;
- } else
- // Unsupported.
- goto unsupported_gep;
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(Op);
+ do {
+ Op = Worklist.pop_back_val();
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ Disp += CI->getSExtValue() * S;
+ } else if (isa<AddOperator>(Op) &&
+ isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
+ // An add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ Disp += CI->getSExtValue() * S;
+ // Add the other operand back to the work list.
+ Worklist.push_back(cast<AddOperator>(Op)->getOperand(0));
+ } else if (IndexReg == 0 &&
+ (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
+ (S == 1 || S == 2 || S == 4 || S == 8)) {
+ // Scaled-index addressing.
+ Scale = S;
+ IndexReg = getRegForGEPIndex(Op).first;
+ if (IndexReg == 0)
+ return false;
+ } else
+ // Unsupported.
+ goto unsupported_gep;
+ } while (!Worklist.empty());
}
}
// Check for displacement overflow.
@@ -473,7 +508,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// If this reference is relative to the pic base, set it now.
if (isGlobalRelativeToPICBase(GVFlags)) {
// FIXME: How do we know Base.Reg is free??
- AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(&MF);
+ AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
}
// Unless the ABI requires an extra load, return a direct reference to
@@ -504,6 +539,9 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
StubAM.GV = GV;
StubAM.GVOpFlags = GVFlags;
+ // Prepare for inserting code in the local-value area.
+ SavePoint SaveInsertPt = enterLocalValueArea();
+
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
@@ -516,8 +554,13 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
}
LoadReg = createResultReg(RC);
- addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM);
-
+ MachineInstrBuilder LoadMI =
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ addFullAddress(LoadMI, StubAM);
+
+ // Ok, back to normal mode.
+ leaveLocalValueArea(SaveInsertPt);
+
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg;
}
@@ -642,6 +685,93 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
return X86FastEmitStore(VT, I->getOperand(0), AM);
}
+/// X86SelectRet - Select and emit code to implement ret instructions.
+bool X86FastISel::X86SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (CC != CallingConv::C &&
+ CC != CallingConv::Fast &&
+ CC != CallingConv::X86_FastCall)
+ return false;
+
+ if (Subtarget->isTargetWin64())
+ return false;
+
+ // Don't handle popping bytes on return for now.
+ if (FuncInfo.MF->getInfo<X86MachineFunctionInfo>()
+ ->getBytesToPopOnReturn() != 0)
+ return 0;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ return false;
+
+ // Let SDISel handle vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ if (Ret->getNumOperands() > 0) {
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, CCAssignFnForRet(CC));
+
+ const Value *RV = Ret->getOperand(0);
+ unsigned Reg = getRegForValue(RV);
+ if (Reg == 0)
+ return false;
+
+ // Only handle a single return value for now.
+ if (ValLocs.size() != 1)
+ return false;
+
+ CCValAssign &VA = ValLocs[0];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ // Only handle register returns for now.
+ if (!VA.isRegLoc())
+ return false;
+ // TODO: For now, don't try to handle cases where getLocInfo()
+ // says Full but the types don't match.
+ if (VA.getValVT() != TLI.getValueType(RV->getType()))
+ return false;
+
+ // The calling-convention tables for x87 returns don't tell
+ // the whole story.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
+ return false;
+
+ // Make the copy.
+ unsigned SrcReg = Reg + VA.getValNo();
+ unsigned DstReg = VA.getLocReg();
+ const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
+ // Avoid a cross-class copy. This is very unlikely.
+ if (!SrcRC->contains(DstReg))
+ return false;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ DstReg).addReg(SrcReg);
+
+ // Mark the register as live out of the function.
+ MRI.addLiveOut(VA.getLocReg());
+ }
+
+ // Now emit the RET.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
+ return true;
+}
+
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(const Instruction *I) {
@@ -661,15 +791,15 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
return false;
}
-static unsigned X86ChooseCmpOpcode(EVT VT) {
+static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
case MVT::i8: return X86::CMP8rr;
case MVT::i16: return X86::CMP16rr;
case MVT::i32: return X86::CMP32rr;
case MVT::i64: return X86::CMP64rr;
- case MVT::f32: return X86::UCOMISSrr;
- case MVT::f64: return X86::UCOMISDrr;
+ case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0;
+ case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0;
}
}
@@ -706,18 +836,21 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
- .addImm(Op1C->getSExtValue());
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
+ .addReg(Op0Reg)
+ .addImm(Op1C->getSExtValue());
return true;
}
}
- unsigned CompareOpc = X86ChooseCmpOpcode(VT);
+ unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false;
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
+ .addReg(Op0Reg)
+ .addReg(Op1Reg);
return true;
}
@@ -739,9 +872,10 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETEr), EReg);
- BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNPr), NPReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -752,9 +886,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg);
- BuildMI(MBB, DL, TII.get(X86::SETPr), PReg);
- BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETNEr), NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::SETPr), PReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::OR8rr), ResultReg)
+ .addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -793,7 +931,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -819,8 +957,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Unconditional branches are selected by tablegen-generated code.
// Handle a conditional branch.
const BranchInst *BI = cast<BranchInst>(I);
- MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
- MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
+ MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
+ MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
// Fold the common case of a conditional branch with a comparison.
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
@@ -829,7 +967,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Try to take advantage of fallthrough opportunities.
CmpInst::Predicate Predicate = CI->getPredicate();
- if (MBB->isLayoutSuccessor(TrueMBB)) {
+ if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
std::swap(TrueMBB, FalseMBB);
Predicate = CmpInst::getInversePredicate(Predicate);
}
@@ -878,16 +1016,18 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
+ .addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above).
- BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
+ .addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
} else if (ExtractValueInst *EI =
@@ -910,10 +1050,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
const MachineInstr *SetMI = 0;
- unsigned Reg = lookUpRegForValue(EI);
+ unsigned Reg = getRegForValue(EI);
for (MachineBasicBlock::const_reverse_iterator
- RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) {
+ RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend();
+ RI != RE; ++RI) {
const MachineInstr &MI = *RI;
if (MI.definesRegister(Reg)) {
@@ -938,11 +1079,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpCode = SetMI->getOpcode();
if (OpCode == X86::SETOr || OpCode == X86::SETBr) {
- BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ?
- X86::JO_4 : X86::JB_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
}
@@ -954,10 +1095,12 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
- BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
- MBB->addSuccessor(TrueMBB);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(OpReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
+ .addMBB(TrueMBB);
+ FastEmitBranch(FalseMBB, DL);
+ FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1014,7 +1157,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
// Fold immediate in shl(x,3).
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpImm),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg);
return true;
@@ -1022,17 +1165,19 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CReg).addReg(Op1Reg);
// The shift instruction uses X86::CL. If we defined a super-register
- // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what
- // we're doing here.
+ // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
- .addReg(CReg).addImm(X86::sub_8bit);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(TargetOpcode::KILL), X86::CL)
+ .addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
+ .addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1064,9 +1209,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
unsigned Op2Reg = getRegForValue(I->getOperand(2));
if (Op2Reg == 0) return false;
- BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
+ .addReg(Op0Reg).addReg(Op0Reg);
unsigned ResultReg = createResultReg(RC);
- BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ .addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1080,7 +1227,9 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR64RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSS2SDrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1097,7 +1246,9 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(X86::FR32RegisterClass);
- BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(X86::CVTSD2SSrr), ResultReg)
+ .addReg(OpReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1128,11 +1279,11 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
return false;
// First issue a copy to GR16_ABCD or GR32_ABCD.
- unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
@@ -1153,14 +1304,18 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
+ case Intrinsic::uadd_with_overflow: {
// Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ unsigned OpReg = getRegForValue(Agg);
+ if (OpReg == 0)
+ return false;
+ UpdateValueMap(I, OpReg + *EI->idx_begin());
return true;
}
+ }
}
return false;
@@ -1174,8 +1329,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// Emit code inline code to store the stack guard onto the stack.
EVT PtrTy = TLI.getPointerTy();
- const Value *Op1 = I.getOperand(1); // The guard's value.
- const AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
+ const Value *Op1 = I.getArgOperand(0); // The guard's value.
+ const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
// Grab the frame index.
X86AddressMode AM;
@@ -1186,7 +1341,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return true;
}
case Intrinsic::objectsize: {
- ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
const Type *Ty = I.getCalledFunction()->getReturnType();
assert(CI && "Non-constant type in Intrinsic::objectsize?");
@@ -1204,8 +1359,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(OpC), ResultReg).
- addImm(CI->getZExtValue() == 0 ? -1ULL : 0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
+ addImm(CI->isZero() ? -1ULL : 0);
UpdateValueMap(&I, ResultReg);
return true;
}
@@ -1218,12 +1373,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
- addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0).
- addMetadata(DI->getVariable());
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
+ addImm(0).addMetadata(DI->getVariable());
return true;
}
case Intrinsic::trap: {
- BuildMI(MBB, DL, TII.get(X86::TRAP));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
return true;
}
case Intrinsic::sadd_with_overflow:
@@ -1241,8 +1396,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
if (!isTypeLegal(RetTy, VT))
return false;
- const Value *Op1 = I.getOperand(1);
- const Value *Op2 = I.getOperand(2);
+ const Value *Op1 = I.getArgOperand(0);
+ const Value *Op2 = I.getArgOperand(1);
unsigned Reg1 = getRegForValue(Op1);
unsigned Reg2 = getRegForValue(Op2);
@@ -1259,7 +1414,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
+ .addReg(Reg1).addReg(Reg2);
unsigned DestReg1 = UpdateValueMap(&I, ResultReg);
// If the add with overflow is an intra-block value then we just want to
@@ -1277,7 +1433,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
- BuildMI(MBB, DL, TII.get(Opc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
return true;
}
}
@@ -1285,7 +1441,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
bool X86FastISel::X86SelectCall(const Instruction *I) {
const CallInst *CI = cast<CallInst>(I);
- const Value *Callee = I->getOperand(0);
+ const Value *Callee = CI->getCalledValue();
// Can't handle inline asm yet.
if (isa<InlineAsm>(Callee))
@@ -1314,6 +1470,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (FTy->isVarArg())
return false;
+ // Fast-isel doesn't know about callee-pop yet.
+ if (Subtarget->IsCalleePop(FTy->isVarArg(), CC))
+ return false;
+
// Handle *simple* calls for now.
const Type *RetTy = CS.getType();
EVT RetVT;
@@ -1387,6 +1547,12 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
+
+ // Allocate shadow area for Win64
+ if (Subtarget->isTargetWin64()) {
+ CCInfo.AllocateStack(32, 8);
+ }
+
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC));
// Get a count of how many bytes are to be pushed on the stack.
@@ -1394,7 +1560,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_START
unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
+ .addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting
// copies / loads.
@@ -1449,11 +1616,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
}
if (VA.isRegLoc()) {
- TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(),
- Arg, RC, RC, DL);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
@@ -1475,12 +1639,9 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (Subtarget->isPICStyleGOT()) {
- TargetRegisterClass *RC = X86::GR32RegisterClass;
- unsigned Base = getInstrInfo()->getGlobalBaseReg(&MF);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC,
- DL);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ X86::EBX).addReg(Base);
}
// Issue the call.
@@ -1488,7 +1649,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (CalleeOp) {
// Register-indirect call.
unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r;
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addReg(CalleeOp);
} else {
// Direct call.
@@ -1517,7 +1679,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
}
- MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ .addGlobalAddress(GV, 0, OpFlags);
}
// Add an implicit use GOT pointer in EBX.
@@ -1530,9 +1693,11 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
- BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
+ .addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
+ SmallVector<unsigned, 4> UsedRegs;
if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
@@ -1542,7 +1707,6 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
EVT CopyVT = RVLocs[0].getValVT();
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
- TargetRegisterClass *SrcRC = DstRC;
// If this is a call to a function that returns an fp value on the x87 fp
// stack, but where we prefer to use the value in xmm registers, copy it
@@ -1551,15 +1715,14 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
RVLocs[0].getLocReg() == X86::ST1) &&
isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
CopyVT = MVT::f80;
- SrcRC = X86::RSTRegisterClass;
DstRC = X86::RFP80RegisterClass;
}
unsigned ResultReg = createResultReg(DstRC);
- bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
- RVLocs[0].getLocReg(), DstRC, SrcRC, DL);
- assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
- Emitted = true;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ ResultReg).addReg(RVLocs[0].getLocReg());
+ UsedRegs.push_back(RVLocs[0].getLocReg());
+
if (CopyVT != RVLocs[0].getValVT()) {
// Round the F80 the right size, which also moves to the appropriate xmm
// register. This is accomplished by storing the F80 value in memory and
@@ -1568,18 +1731,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc)), FI)
+ .addReg(ResultReg);
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC);
- addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), FI);
}
if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
- BuildMI(MBB, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult;
}
@@ -1587,6 +1753,9 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
UpdateValueMap(I, ResultReg);
}
+ // Set all unused physreg defs as dead.
+ static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
+
return true;
}
@@ -1599,6 +1768,8 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
return X86SelectLoad(I);
case Instruction::Store:
return X86SelectStore(I);
+ case Instruction::Ret:
+ return X86SelectRet(I);
case Instruction::ICmp:
case Instruction::FCmp:
return X86SelectCmp(I);
@@ -1699,7 +1870,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
else
Opc = X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
return 0;
@@ -1717,10 +1889,10 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
unsigned char OpFlag = 0;
if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
OpFlag = X86II::MO_PIC_BASE_OFFSET;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleGOT()) {
OpFlag = X86II::MO_GOTOFF;
- PICBase = getInstrInfo()->getGlobalBaseReg(&MF);
+ PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} else if (Subtarget->isPICStyleRIPRel() &&
TM.getCodeModel() == CodeModel::Small) {
PICBase = X86::RIP;
@@ -1729,7 +1901,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg),
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
return ResultReg;
@@ -1743,7 +1916,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// various places, but TargetMaterializeAlloca also needs a check
// in order to avoid recursion between getRegForValue,
// X86SelectAddrss, and TargetMaterializeAlloca.
- if (!StaticAllocaMap.count(C))
+ if (!FuncInfo.StaticAllocaMap.count(C))
return 0;
X86AddressMode AM;
@@ -1752,24 +1925,13 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ TII.get(Opc), ResultReg), AM);
return ResultReg;
}
namespace llvm {
- llvm::FastISel *X86::createFastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &pn
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- ) {
- return new X86FastISel(mf, vm, bm, am, pn
-#ifndef NDEBUG
- , cil
-#endif
- );
+ llvm::FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo) {
+ return new X86FastISel(funcInfo);
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86FixupKinds.h b/contrib/llvm/lib/Target/X86/X86FixupKinds.h
index a8117d4..96e0aae 100644
--- a/contrib/llvm/lib/Target/X86/X86FixupKinds.h
+++ b/contrib/llvm/lib/Target/X86/X86FixupKinds.h
@@ -17,6 +17,7 @@ namespace X86 {
enum Fixups {
reloc_pcrel_4byte = FirstTargetFixupKind, // 32-bit pcrel, e.g. a branch.
reloc_pcrel_1byte, // 8-bit pcrel, e.g. branch_1
+ reloc_pcrel_2byte, // 16-bit pcrel, e.g. callw
reloc_riprel_4byte, // 32-bit rip-relative
reloc_riprel_4byte_movq_load // 32-bit rip-relative in movq
};
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
index 93460ef..cee4ad7 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -133,7 +133,7 @@ namespace {
// Emit an fxch to update the runtime processors version of the state.
BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(STReg);
- NumFXCH++;
+ ++NumFXCH;
}
void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) {
@@ -164,6 +164,8 @@ namespace {
void handleCompareFP(MachineBasicBlock::iterator &I);
void handleCondMovFP(MachineBasicBlock::iterator &I);
void handleSpecialFP(MachineBasicBlock::iterator &I);
+
+ bool translateCopy(MachineInstr*);
};
char FPS::ID = 0;
}
@@ -232,12 +234,15 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) {
for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) {
MachineInstr *MI = I;
- unsigned Flags = MI->getDesc().TSFlags;
+ uint64_t Flags = MI->getDesc().TSFlags;
unsigned FPInstClass = Flags & X86II::FPTypeMask;
if (MI->isInlineAsm())
FPInstClass = X86II::SpecialFP;
-
+
+ if (MI->isCopy() && translateCopy(MI))
+ FPInstClass = X86II::SpecialFP;
+
if (FPInstClass == X86II::NotFP)
continue; // Efficiently ignore non-fp insts!
@@ -628,7 +633,7 @@ void FPS::handleZeroArgFP(MachineBasicBlock::iterator &I) {
void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) {
MachineInstr *MI = I;
unsigned NumOps = MI->getDesc().getNumOperands();
- assert((NumOps == X86AddrNumOperands + 1 || NumOps == 1) &&
+ assert((NumOps == X86::AddrNumOperands + 1 || NumOps == 1) &&
"Can only handle fst* & ftst instructions!");
// Is this the last use of the source register?
@@ -1001,15 +1006,17 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
case X86::FpSET_ST0_32:
case X86::FpSET_ST0_64:
case X86::FpSET_ST0_80: {
+ // FpSET_ST0_80 is generated by copyRegToReg for setting up inline asm
+ // arguments that use an st constraint. We expect a sequence of
+ // instructions: Fp_SET_ST0 Fp_SET_ST1? INLINEASM
unsigned Op0 = getFPReg(MI->getOperand(0));
- // FpSET_ST0_80 is generated by copyRegToReg for both function return
- // and inline assembly with the "st" constrain. In the latter case,
- // it is possible for ST(0) to be alive after this instruction.
if (!MI->killsRegister(X86::FP0 + Op0)) {
- // Duplicate Op0
- duplicateToTop(0, 7 /*temp register*/, I);
+ // Duplicate Op0 into a temporary on the stack top.
+ // This actually assumes that FP7 is dead.
+ duplicateToTop(Op0, 7, I);
} else {
+ // Op0 is killed, so just swap it into position.
moveToTop(Op0, I);
}
--StackTop; // "Forget" we have something on the top of stack!
@@ -1017,17 +1024,29 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
}
case X86::FpSET_ST1_32:
case X86::FpSET_ST1_64:
- case X86::FpSET_ST1_80:
- // StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
- if (StackTop == 1) {
- BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
- NumFXCH++;
- StackTop = 0;
- break;
+ case X86::FpSET_ST1_80: {
+ // Set up st(1) for inline asm. We are assuming that st(0) has already been
+ // set up by FpSET_ST0, and our StackTop is off by one because of it.
+ unsigned Op0 = getFPReg(MI->getOperand(0));
+ // Restore the actual StackTop from before Fp_SET_ST0.
+ // Note we can't handle Fp_SET_ST1 without a preceeding Fp_SET_ST0, and we
+ // are not enforcing the constraint.
+ ++StackTop;
+ unsigned RegOnTop = getStackEntry(0); // This reg must remain in st(0).
+ if (!MI->killsRegister(X86::FP0 + Op0)) {
+ // Assume FP6 is not live, use it as a scratch register.
+ duplicateToTop(Op0, 6, I);
+ moveToTop(RegOnTop, I);
+ } else if (getSTReg(Op0) != X86::ST1) {
+ // We have the wrong value at st(1). Shuffle! Untested!
+ moveToTop(getStackEntry(1), I);
+ moveToTop(Op0, I);
+ moveToTop(RegOnTop, I);
}
- assert(StackTop == 2 && "Stack should have two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
+ assert(StackTop >= 2 && "Too few live registers");
+ StackTop -= 2; // "Forget" both st(0) and st(1).
break;
+ }
case X86::MOV_Fp3232:
case X86::MOV_Fp3264:
case X86::MOV_Fp6432:
@@ -1041,32 +1060,6 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
unsigned SrcReg = getFPReg(MO1);
const MachineOperand &MO0 = MI->getOperand(0);
- // These can be created due to inline asm. Two address pass can introduce
- // copies from RFP registers to virtual registers.
- if (MO0.getReg() == X86::ST0 && SrcReg == 0) {
- assert(MO1.isKill());
- // Treat %ST0<def> = MOV_Fp8080 %FP0<kill>
- // like FpSET_ST0_80 %FP0<kill>, %ST0<imp-def>
- assert((StackTop == 1 || StackTop == 2)
- && "Stack should have one or two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
- break;
- } else if (MO0.getReg() == X86::ST1 && SrcReg == 1) {
- assert(MO1.isKill());
- // Treat %ST1<def> = MOV_Fp8080 %FP1<kill>
- // like FpSET_ST1_80 %FP0<kill>, %ST1<imp-def>
- // StackTop can be 1 if a FpSET_ST0_* was before this. Exchange them.
- if (StackTop == 1) {
- BuildMI(*MBB, I, dl, TII->get(X86::XCH_F)).addReg(X86::ST1);
- NumFXCH++;
- StackTop = 0;
- break;
- }
- assert(StackTop == 2 && "Stack should have two element on it to return!");
- --StackTop; // "Forget" we have something on the top of stack!
- break;
- }
-
unsigned DestReg = getFPReg(MO0);
if (MI->killsRegister(X86::FP0+SrcReg)) {
// If the input operand is killed, we can just change the owner of the
@@ -1206,3 +1199,33 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) {
I = MBB->erase(I); // Remove the pseudo instruction
--I;
}
+
+// Translate a COPY instruction to a pseudo-op that handleSpecialFP understands.
+bool FPS::translateCopy(MachineInstr *MI) {
+ unsigned DstReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+
+ if (DstReg == X86::ST0) {
+ MI->setDesc(TII->get(X86::FpSET_ST0_80));
+ MI->RemoveOperand(0);
+ return true;
+ }
+ if (DstReg == X86::ST1) {
+ MI->setDesc(TII->get(X86::FpSET_ST1_80));
+ MI->RemoveOperand(0);
+ return true;
+ }
+ if (SrcReg == X86::ST0) {
+ MI->setDesc(TII->get(X86::FpGET_ST0_80));
+ return true;
+ }
+ if (SrcReg == X86::ST1) {
+ MI->setDesc(TII->get(X86::FpGET_ST1_80));
+ return true;
+ }
+ if (X86::RFP80RegClass.contains(DstReg, SrcReg)) {
+ MI->setDesc(TII->get(X86::MOV_Fp8080));
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp b/contrib/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
index 747683d..2c98b96 100644
--- a/contrib/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
+++ b/contrib/llvm/lib/Target/X86/X86FloatingPointRegKill.cpp
@@ -72,18 +72,15 @@ static bool isFPStackVReg(unsigned RegNo, const MachineRegisterInfo &MRI) {
/// stack code, and thus needs an FP_REG_KILL.
static bool ContainsFPStackCode(MachineBasicBlock *MBB,
const MachineRegisterInfo &MRI) {
- // Scan the block, looking for instructions that define fp stack vregs.
+ // Scan the block, looking for instructions that define or use fp stack vregs.
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ++I) {
- if (I->getNumOperands() == 0 || !I->getOperand(0).isReg())
- continue;
-
for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
- if (!I->getOperand(op).isReg() || !I->getOperand(op).isDef())
+ if (!I->getOperand(op).isReg())
continue;
-
- if (isFPStackVReg(I->getOperand(op).getReg(), MRI))
- return true;
+ if (unsigned Reg = I->getOperand(op).getReg())
+ if (isFPStackVReg(Reg, MRI))
+ return true;
}
}
@@ -108,8 +105,8 @@ static bool ContainsFPStackCode(MachineBasicBlock *MBB,
bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) {
// If we are emitting FP stack code, scan the basic block to determine if this
- // block defines any FP values. If so, put an FP_REG_KILL instruction before
- // the terminator of the block.
+ // block defines or uses any FP values. If so, put an FP_REG_KILL instruction
+ // before the terminator of the block.
// Note that FP stack instructions are used in all modes for long double,
// so we always need to do this check.
diff --git a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 0f64383..72f2bc1 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -137,21 +137,6 @@ namespace {
}
namespace {
- class X86ISelListener : public SelectionDAG::DAGUpdateListener {
- SmallSet<SDNode*, 4> Deletes;
- public:
- explicit X86ISelListener() {}
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
- Deletes.insert(N);
- }
- virtual void NodeUpdated(SDNode *N) {
- // Ignore updates.
- }
- bool IsDeleted(SDNode *N) {
- return Deletes.count(N);
- }
- };
-
//===--------------------------------------------------------------------===//
/// ISel - X86 specific code to select X86 machine instructions for
/// SelectionDAG operations.
@@ -199,16 +184,17 @@ namespace {
bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
- X86ISelListener &DeadNodes,
unsigned Depth);
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp,
SDValue &Segment);
bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Scale, SDValue &Index, SDValue &Disp);
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
- SDValue &Scale, SDValue &Index, SDValue &Disp);
+ SDValue &Scale, SDValue &Index, SDValue &Disp,
+ SDValue &Segment);
bool SelectScalarSSELoad(SDNode *Root, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
@@ -239,7 +225,8 @@ namespace {
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
- Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
+ Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
+ MVT::i32, AM.Disp,
AM.SymbolFlags);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
@@ -386,14 +373,14 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
}
for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
Ops.push_back(OrigChain.getOperand(i));
- CurDAG->UpdateNodeOperands(OrigChain, &Ops[0], Ops.size());
- CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
+ CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
Ops.push_back(Call.getOperand(i));
- CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
}
/// isCalleeLoad - Return true if call address is a load and it can be
@@ -515,7 +502,7 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
N->getOperand(0),
MemTmp, NULL, 0, MemVT,
false, false, 0);
- SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
+ SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, dl, Store, MemTmp,
NULL, 0, MemVT, false, false, 0);
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
@@ -664,8 +651,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
/// returning true if it cannot be done. This just pattern matches for the
/// addressing mode.
bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
- X86ISelListener DeadNodes;
- if (MatchAddressRecursively(N, AM, DeadNodes, 0))
+ if (MatchAddressRecursively(N, AM, 0))
return true;
// Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
@@ -713,7 +699,6 @@ static bool isLogicallyAddWithConstant(SDValue V, SelectionDAG *CurDAG) {
}
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
- X86ISelListener &DeadNodes,
unsigned Depth) {
bool is64Bit = Subtarget->is64Bit();
DebugLoc dl = N.getDebugLoc();
@@ -876,13 +861,13 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
// other uses, since it avoids a two-address sub instruction, however
// it costs an additional mov if the index register has other uses.
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+
// Test if the LHS of the sub can be folded.
X86ISelAddressMode Backup = AM;
- if (MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1) ||
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- DeadNodes.IsDeleted(N.getNode())) {
+ if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
AM = Backup;
break;
}
@@ -893,7 +878,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
}
int Cost = 0;
- SDValue RHS = N.getNode()->getOperand(1);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
// If the RHS involves a register with multiple uses, this
// transformation incurs an extra mov, due to the neg instruction
// clobbering its operand.
@@ -944,35 +929,27 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
}
case ISD::ADD: {
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+ SDValue LHS = Handle.getValue().getNode()->getOperand(0);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
+
X86ISelAddressMode Backup = AM;
- if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1)) {
- if (DeadNodes.IsDeleted(N.getNode()))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return true;
- if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
- DeadNodes, Depth+1))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return DeadNodes.IsDeleted(N.getNode());
- }
+ if (!MatchAddressRecursively(LHS, AM, Depth+1) &&
+ !MatchAddressRecursively(RHS, AM, Depth+1))
+ return false;
+ AM = Backup;
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
// Try again after commuting the operands.
+ if (!MatchAddressRecursively(RHS, AM, Depth+1) &&
+ !MatchAddressRecursively(LHS, AM, Depth+1))
+ return false;
AM = Backup;
- if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
- DeadNodes, Depth+1)) {
- if (DeadNodes.IsDeleted(N.getNode()))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return true;
- if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return DeadNodes.IsDeleted(N.getNode());
- }
- AM = Backup;
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
// If we couldn't fold both operands into the address at the same time,
// see if we can just put each operand into a register and fold at least
@@ -980,8 +957,8 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
if (AM.BaseType == X86ISelAddressMode::RegBase &&
!AM.Base_Reg.getNode() &&
!AM.IndexReg.getNode()) {
- AM.Base_Reg = N.getNode()->getOperand(0);
- AM.IndexReg = N.getNode()->getOperand(1);
+ AM.Base_Reg = LHS;
+ AM.IndexReg = RHS;
AM.Scale = 1;
return false;
}
@@ -996,7 +973,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
uint64_t Offset = CN->getSExtValue();
// Start with the LHS as an addr mode.
- if (!MatchAddressRecursively(N.getOperand(0), AM, DeadNodes, Depth+1) &&
+ if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
// Address could not have picked a GV address for the displacement.
AM.GV == NULL &&
// On x86-64, the resultant disp must fit in 32-bits.
@@ -1073,7 +1050,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
CurDAG->RepositionNode(N.getNode(), Shl.getNode());
Shl.getNode()->setNodeId(N.getNode()->getNodeId());
}
- CurDAG->ReplaceAllUsesWith(N, Shl, &DeadNodes);
+ CurDAG->ReplaceAllUsesWith(N, Shl);
AM.IndexReg = And;
AM.Scale = (1 << ScaleLog);
return false;
@@ -1124,7 +1101,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
}
- CurDAG->ReplaceAllUsesWith(N, NewSHIFT, &DeadNodes);
+ CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
AM.Scale = 1 << ShiftCst;
AM.IndexReg = NewAND;
@@ -1230,7 +1207,8 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
/// mode it matches can be cost effectively emitted as an LEA instruction.
bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
SDValue &Base, SDValue &Scale,
- SDValue &Index, SDValue &Disp) {
+ SDValue &Index, SDValue &Disp,
+ SDValue &Segment) {
X86ISelAddressMode AM;
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
@@ -1284,7 +1262,6 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
if (Complexity <= 2)
return false;
- SDValue Segment;
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1292,10 +1269,10 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
- SDValue &Disp) {
+ SDValue &Disp, SDValue &Segment) {
assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
-
+
X86ISelAddressMode AM;
AM.GV = GA->getGlobal();
AM.Disp += GA->getOffset();
@@ -1309,7 +1286,6 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
}
- SDValue Segment;
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
return true;
}
@@ -1672,6 +1648,26 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
+ // Prevent use of AH in a REX instruction by referencing AX instead.
+ if (HiReg == X86::AH && Subtarget->is64Bit() &&
+ !SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::AX, MVT::i16, InFlag);
+ InFlag = Result.getValue(2);
+ // Get the low part if needed. Don't use getCopyFromReg for aliasing
+ // registers.
+ if (!SDValue(Node, 0).use_empty())
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+
+ // Shift AX down 8 bits.
+ Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)), 0);
+ // Then truncate it down to i8.
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+ }
// Copy the low half of the result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
@@ -1682,24 +1678,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the high half of the result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- SDValue Result;
- if (HiReg == X86::AH && Subtarget->is64Bit()) {
- // Prevent use of AH in a REX instruction by referencing AX instead.
- // Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::AX, MVT::i16, InFlag);
- InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
- Result,
- CurDAG->getTargetConstant(8, MVT::i8)), 0);
- // Then truncate it down to i8.
- Result = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
- MVT::i8, Result);
- } else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- HiReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- }
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ HiReg, NVT, InFlag);
+ InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
@@ -1812,6 +1793,29 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
+ // Prevent use of AH in a REX instruction by referencing AX instead.
+ // Shift it down 8 bits.
+ if (HiReg == X86::AH && Subtarget->is64Bit() &&
+ !SDValue(Node, 1).use_empty()) {
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ X86::AX, MVT::i16, InFlag);
+ InFlag = Result.getValue(2);
+
+ // If we also need AL (the quotient), get it by extracting a subreg from
+ // Result. The fast register allocator does not like multiple CopyFromReg
+ // nodes using aliasing registers.
+ if (!SDValue(Node, 0).use_empty())
+ ReplaceUses(SDValue(Node, 0),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+
+ // Shift AX right by 8 bits instead of using AH.
+ Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
+ Result,
+ CurDAG->getTargetConstant(8, MVT::i8)),
+ 0);
+ ReplaceUses(SDValue(Node, 1),
+ CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
+ }
// Copy the division (low) result, if it is needed.
if (!SDValue(Node, 0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
@@ -1822,25 +1826,9 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
}
// Copy the remainder (high) result, if it is needed.
if (!SDValue(Node, 1).use_empty()) {
- SDValue Result;
- if (HiReg == X86::AH && Subtarget->is64Bit()) {
- // Prevent use of AH in a REX instruction by referencing AX instead.
- // Shift it down 8 bits.
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- X86::AX, MVT::i16, InFlag);
- InFlag = Result.getValue(2);
- Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
- Result,
- CurDAG->getTargetConstant(8, MVT::i8)),
- 0);
- // Then truncate it down to i8.
- Result = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
- MVT::i8, Result);
- } else {
- Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
- HiReg, NVT, InFlag);
- InFlag = Result.getValue(2);
- }
+ SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
+ HiReg, NVT, InFlag);
+ InFlag = Result.getValue(2);
ReplaceUses(SDValue(Node, 1), Result);
DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
}
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
index b02c33d..b3c4886 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -62,21 +62,19 @@ static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
SDValue V2);
static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
- switch (TM.getSubtarget<X86Subtarget>().TargetType) {
- default: llvm_unreachable("unknown subtarget type");
- case X86Subtarget::isDarwin:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_MachoTargetObjectFile();
+
+ bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
+
+ if (TM.getSubtarget<X86Subtarget>().isTargetDarwin()) {
+ if (is64Bit) return new X8664_MachoTargetObjectFile();
return new TargetLoweringObjectFileMachO();
- case X86Subtarget::isELF:
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- return new X8664_ELFTargetObjectFile(TM);
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetELF() ){
+ if (is64Bit) return new X8664_ELFTargetObjectFile(TM);
return new X8632_ELFTargetObjectFile(TM);
- case X86Subtarget::isMingw:
- case X86Subtarget::isCygwin:
- case X86Subtarget::isWindows:
+ } else if (TM.getSubtarget<X86Subtarget>().isTargetCOFF()) {
return new TargetLoweringObjectFileCOFF();
- }
+ }
+ llvm_unreachable("unknown subtarget type");
}
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
@@ -347,6 +345,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
if (!Subtarget->hasSSE2())
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
+ // On X86 and X86-64, atomic operations are lowered to locked instructions.
+ // Locked instructions, in turn, have implicit fence semantics (all memory
+ // operations are flushed before issuing the locked instruction, and they
+ // are not buffered), so we can fold away the common pattern of
+ // fence-atomic-fence.
+ setShouldFoldAtomicFences(true);
// Expand certain atomics
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
@@ -611,7 +615,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass, false);
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass, false);
- addRegisterClass(MVT::v2f32, X86::VR64RegisterClass, false);
+
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass, false);
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
@@ -657,14 +661,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
- setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
- AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
@@ -672,7 +673,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
- setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
@@ -691,7 +691,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Custom);
- setOperationAction(ISD::BIT_CONVERT, MVT::v2f32, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Custom);
}
}
@@ -792,9 +791,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
EVT VT = SVT;
// Do not attempt to promote non-128-bit vectors
- if (!VT.is128BitVector()) {
+ if (!VT.is128BitVector())
continue;
- }
setOperationAction(ISD::AND, SVT, Promote);
AddPromotedToType (ISD::AND, SVT, MVT::v2i64);
@@ -825,6 +823,17 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
}
if (Subtarget->hasSSE41()) {
+ setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f32, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
+ setOperationAction(ISD::FRINT, MVT::f32, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f64, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
+ setOperationAction(ISD::FRINT, MVT::f64, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
+
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
@@ -965,15 +974,24 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, MVT::i32, Custom);
- setOperationAction(ISD::SADDO, MVT::i64, Custom);
setOperationAction(ISD::UADDO, MVT::i32, Custom);
- setOperationAction(ISD::UADDO, MVT::i64, Custom);
setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- setOperationAction(ISD::SSUBO, MVT::i64, Custom);
setOperationAction(ISD::USUBO, MVT::i32, Custom);
- setOperationAction(ISD::USUBO, MVT::i64, Custom);
setOperationAction(ISD::SMULO, MVT::i32, Custom);
- setOperationAction(ISD::SMULO, MVT::i64, Custom);
+
+ // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
+ // handle type legalization for these operations here.
+ //
+ // FIXME: We really should do custom legalization for addition and
+ // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
+ // than generic legalization for 64-bit multiplication-with-overflow, though.
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::SADDO, MVT::i64, Custom);
+ setOperationAction(ISD::UADDO, MVT::i64, Custom);
+ setOperationAction(ISD::SSUBO, MVT::i64, Custom);
+ setOperationAction(ISD::USUBO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+ }
if (!Subtarget->is64Bit()) {
// These libcalls are not available in 32-bit.
@@ -992,7 +1010,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::STORE);
- setTargetDAGCombine(ISD::MEMBARRIER);
setTargetDAGCombine(ISD::ZERO_EXTEND);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
@@ -1172,6 +1189,27 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4;
}
+bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
+ unsigned &Offset) const {
+ if (!Subtarget->isTargetLinux())
+ return false;
+
+ if (Subtarget->is64Bit()) {
+ // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+ Offset = 0x28;
+ if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
+ AddressSpace = 256;
+ else
+ AddressSpace = 257;
+ } else {
+ // %gs:0x14 on i386
+ Offset = 0x14;
+ AddressSpace = 256;
+ }
+ return true;
+}
+
+
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
@@ -1180,19 +1218,19 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const {
bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const {
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
- RVLocs, *DAG.getContext());
- return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86);
+ RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC_X86);
}
SDValue
X86TargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -1220,7 +1258,7 @@ X86TargetLowering::LowerReturn(SDValue Chain,
for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- SDValue ValToCopy = Outs[i].Val;
+ SDValue ValToCopy = OutVals[i];
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier.
@@ -1308,17 +1346,34 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
report_fatal_error("SSE register return with SSE disabled");
}
+ SDValue Val;
+
// If this is a call to a function that returns an fp value on the floating
- // point stack, but where we prefer to use the value in xmm registers, copy
- // it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
- if ((VA.getLocReg() == X86::ST0 ||
- VA.getLocReg() == X86::ST1) &&
- isScalarFPTypeInSSEReg(VA.getValVT())) {
- CopyVT = MVT::f80;
- }
+ // point stack, we must guarantee the the value is popped from the stack, so
+ // a CopyFromReg is not good enough - the copy instruction may be eliminated
+ // if the return value is not used. We use the FpGET_ST0 instructions
+ // instead.
+ if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) {
+ // If we prefer to use the value in xmm registers, copy it out as f80 and
+ // use a truncate to move it from fp stack reg to xmm reg.
+ if (isScalarFPTypeInSSEReg(VA.getValVT())) CopyVT = MVT::f80;
+ bool isST0 = VA.getLocReg() == X86::ST0;
+ unsigned Opc = 0;
+ if (CopyVT == MVT::f32) Opc = isST0 ? X86::FpGET_ST0_32:X86::FpGET_ST1_32;
+ if (CopyVT == MVT::f64) Opc = isST0 ? X86::FpGET_ST0_64:X86::FpGET_ST1_64;
+ if (CopyVT == MVT::f80) Opc = isST0 ? X86::FpGET_ST0_80:X86::FpGET_ST1_80;
+ SDValue Ops[] = { Chain, InFlag };
+ Chain = SDValue(DAG.getMachineNode(Opc, dl, CopyVT, MVT::Other, MVT::Flag,
+ Ops, 2), 1);
+ Val = Chain.getValue(0);
- SDValue Val;
- if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
+ // Round the f80 to the right size, which also moves it to the appropriate
+ // xmm register.
+ if (CopyVT != VA.getValVT())
+ Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
+ // This truncation won't change the value.
+ DAG.getIntPtrConstant(1));
+ } else if (Is64Bit && CopyVT.isVector() && CopyVT.getSizeInBits() == 64) {
// For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64.
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
@@ -1338,15 +1393,6 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
Val = Chain.getValue(0);
}
InFlag = Chain.getValue(2);
-
- if (CopyVT != VA.getValVT()) {
- // Round the F80 the right size, which also moves to the appropriate xmm
- // register.
- Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
- // This truncation won't change the value.
- DAG.getIntPtrConstant(1));
- }
-
InVals.push_back(Val);
}
@@ -1383,29 +1429,6 @@ ArgsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
return Ins[0].Flags.isSRet();
}
-/// IsCalleePop - Determines whether the callee is required to pop its
-/// own arguments. Callee pop is necessary to support tail calls.
-bool X86TargetLowering::IsCalleePop(bool IsVarArg,
- CallingConv::ID CallingConv) const {
- if (IsVarArg)
- return false;
-
- switch (CallingConv) {
- default:
- return false;
- case CallingConv::X86_StdCall:
- return !Subtarget->is64Bit();
- case CallingConv::X86_FastCall:
- return !Subtarget->is64Bit();
- case CallingConv::X86_ThisCall:
- return !Subtarget->is64Bit();
- case CallingConv::Fast:
- return GuaranteedTailCallOpt;
- case CallingConv::GHC:
- return GuaranteedTailCallOpt;
- }
-}
-
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
/// given CallingConvention value.
CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
@@ -1483,11 +1506,11 @@ X86TargetLowering::LowerMemArgument(SDValue Chain,
// could be overwritten by lowering of arguments in case of a tail call.
if (Flags.isByVal()) {
int FI = MFI->CreateFixedObject(Flags.getByValSize(),
- VA.getLocMemOffset(), isImmutable, false);
+ VA.getLocMemOffset(), isImmutable);
return DAG.getFrameIndex(FI, getPointerTy());
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
- VA.getLocMemOffset(), isImmutable, false);
+ VA.getLocMemOffset(), isImmutable);
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
return DAG.getLoad(ValVT, dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0,
@@ -1615,8 +1638,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
if (isVarArg) {
if (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
CallConv != CallingConv::X86_ThisCall)) {
- FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,
- true, false));
+ FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize,true));
}
if (Is64Bit) {
unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
@@ -1722,7 +1744,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
}
// Some CCs need callee pop.
- if (IsCalleePop(isVarArg, CallConv)) {
+ if (Subtarget->IsCalleePop(isVarArg, CallConv)) {
FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
@@ -1788,7 +1810,7 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
// Calculate the new stack slot for the return address.
int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI =
- MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false, false);
+ MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false);
EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
@@ -1802,6 +1824,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1814,7 +1837,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
- Outs, Ins, DAG);
+ Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
@@ -1874,7 +1897,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
EVT RegVT = VA.getLocVT();
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
bool isByVal = Flags.isByVal();
@@ -2013,12 +2036,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (VA.isRegLoc())
continue;
assert(VA.isMemLoc());
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
- FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true, false);
+ FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal()) {
@@ -2059,7 +2082,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
FPDiff, dl);
}
- bool WasGlobalOrExternal = false;
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
@@ -2067,7 +2089,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// pc-relative offset may not be large enough to hold the whole
// address.
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- WasGlobalOrExternal = true;
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
// it.
@@ -2095,11 +2116,10 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
OpFlags = X86II::MO_DARWIN_STUB;
}
- Callee = DAG.getTargetGlobalAddress(GV, getPointerTy(),
+ Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
G->getOffset(), OpFlags);
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- WasGlobalOrExternal = true;
unsigned char OpFlags = 0;
// On ELF targets, in either X86-64 or X86-32 mode, direct calls to external
@@ -2153,17 +2173,12 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Ops.push_back(InFlag);
if (isTailCall) {
- // If this is the first return lowered for this function, add the regs
- // to the liveout set for the function.
- if (MF.getRegInfo().liveout_empty()) {
- SmallVector<CCValAssign, 16> RVLocs;
- CCState CCInfo(CallConv, isVarArg, getTargetMachine(), RVLocs,
- *DAG.getContext());
- CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
- for (unsigned i = 0; i != RVLocs.size(); ++i)
- if (RVLocs[i].isRegLoc())
- MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
- }
+ // We used to do:
+ //// If this is the first return lowered for this function, add the regs
+ //// to the liveout set for the function.
+ // This isn't right, although it's probably harmless on x86; liveouts
+ // should be computed from returns not tail calls. Consider a void
+ // function making a tail call to a function returning int.
return DAG.getNode(X86ISD::TC_RETURN, dl,
NodeTys, &Ops[0], Ops.size());
}
@@ -2173,7 +2188,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPush;
- if (IsCalleePop(isVarArg, CallConv))
+ if (Subtarget->IsCalleePop(isVarArg, CallConv))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet)
// If this is a call to a struct-return function, the callee
@@ -2314,6 +2329,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
bool isCalleeStructRet,
bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
if (!IsTailCallConvention(CalleeCC) &&
@@ -2332,8 +2348,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
return false;
}
- // Look for obvious safe cases to perform tail call optimization that does not
- // requite ABI changes. This is what gcc calls sibcall.
+ // Look for obvious safe cases to perform tail call optimization that do not
+ // require ABI changes. This is what gcc calls sibcall.
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
@@ -2427,8 +2443,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
((X86TargetMachine&)getTargetMachine()).getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- EVT RegVT = VA.getLocVT();
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
@@ -2439,26 +2454,38 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
}
}
}
+
+ // If the tailcall address may be in a register, then make sure it's
+ // possible to register allocate for it. In 32-bit, the call address can
+ // only target EAX, EDX, or ECX since the tail call must be scheduled after
+ // callee-saved registers are restored. These happen to be the same
+ // registers used to pass 'inreg' arguments so watch out for those.
+ if (!Subtarget->is64Bit() &&
+ !isa<GlobalAddressSDNode>(Callee) &&
+ !isa<ExternalSymbolSDNode>(Callee)) {
+ unsigned NumInRegs = 0;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (!VA.isRegLoc())
+ continue;
+ unsigned Reg = VA.getLocReg();
+ switch (Reg) {
+ default: break;
+ case X86::EAX: case X86::EDX: case X86::ECX:
+ if (++NumInRegs == 3)
+ return false;
+ break;
+ }
+ }
+ }
}
return true;
}
FastISel *
-X86TargetLowering::createFastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock*, MachineBasicBlock*> &bm,
- DenseMap<const AllocaInst *, int> &am,
- std::vector<std::pair<MachineInstr*, unsigned> > &pn
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &cil
-#endif
- ) const {
- return X86::createFastISel(mf, vm, bm, am, pn
-#ifndef NDEBUG
- , cil
-#endif
- );
+X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const {
+ return X86::createFastISel(funcInfo);
}
@@ -2476,7 +2503,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
// Set up a frame object for the return address.
uint64_t SlotSize = TD->getPointerSize();
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
- false, false);
+ false);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
@@ -3175,7 +3202,7 @@ unsigned X86::getShufflePALIGNRImmediate(SDNode *N) {
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
+ cast<ConstantSDNode>(Elt)->isNullValue()) ||
(isa<ConstantFPSDNode>(Elt) &&
cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
}
@@ -4433,7 +4460,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
}
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
-/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
+/// ones, or rewriting v4i32 / v2i32 as 2 wide ones if possible. This can be
/// done when every pair / quad of shuffle mask elements point to elements in
/// the right sequence. e.g.
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
@@ -4447,7 +4474,6 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
unsigned NumElems = VT.getVectorNumElements();
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
EVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
- EVT MaskEltVT = MaskVT.getVectorElementType();
EVT NewVT = MaskVT;
switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Unexpected!");
@@ -5059,13 +5085,9 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
SDValue
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- if (Op.getValueType() == MVT::v2f32)
- return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
- DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
- Op.getOperand(0))));
-
- if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
+
+ if (Op.getValueType() == MVT::v1i64 &&
+ Op.getOperand(0).getValueType() == MVT::i64)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
@@ -5230,10 +5252,10 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
if (OpFlags == X86II::MO_NO_FLAG &&
X86::isOffsetSuitableForCodeModel(Offset, M)) {
// A direct static reference to a global.
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
Offset = 0;
} else {
- Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0, OpFlags);
+ Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
}
if (Subtarget->isPICStyleRIPRel() &&
@@ -5278,7 +5300,7 @@ GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
DebugLoc dl = GA->getDebugLoc();
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(),
OperandFlags);
@@ -5351,7 +5373,8 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
// exec)
- SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
+ SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
+ GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
@@ -5366,33 +5389,78 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
- // TODO: implement the "local dynamic" model
- // TODO: implement the "initial exec"model for pic executables
- assert(Subtarget->isTargetELF() &&
- "TLS not implemented for non-ELF targets");
+
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GA->getGlobal();
- // If GV is an alias then use the aliasee for determining
- // thread-localness.
- if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
- GV = GA->resolveAliasedGlobal(false);
-
- TLSModel::Model model = getTLSModel(GV,
- getTargetMachine().getRelocationModel());
-
- switch (model) {
- case TLSModel::GeneralDynamic:
- case TLSModel::LocalDynamic: // not implemented
- if (Subtarget->is64Bit())
- return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
- return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
+ if (Subtarget->isTargetELF()) {
+ // TODO: implement the "local dynamic" model
+ // TODO: implement the "initial exec"model for pic executables
+
+ // If GV is an alias then use the aliasee for determining
+ // thread-localness.
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ GV = GA->resolveAliasedGlobal(false);
+
+ TLSModel::Model model
+ = getTLSModel(GV, getTargetMachine().getRelocationModel());
+
+ switch (model) {
+ case TLSModel::GeneralDynamic:
+ case TLSModel::LocalDynamic: // not implemented
+ if (Subtarget->is64Bit())
+ return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
+ return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
+
+ case TLSModel::InitialExec:
+ case TLSModel::LocalExec:
+ return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
+ Subtarget->is64Bit());
+ }
+ } else if (Subtarget->isTargetDarwin()) {
+ // Darwin only has one model of TLS. Lower to that.
+ unsigned char OpFlag = 0;
+ unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
+ X86ISD::WrapperRIP : X86ISD::Wrapper;
+
+ // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
+ // global base reg.
+ bool PIC32 = (getTargetMachine().getRelocationModel() == Reloc::PIC_) &&
+ !Subtarget->is64Bit();
+ if (PIC32)
+ OpFlag = X86II::MO_TLVP_PIC_BASE;
+ else
+ OpFlag = X86II::MO_TLVP;
+ DebugLoc DL = Op.getDebugLoc();
+ SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
+ getPointerTy(),
+ GA->getOffset(), OpFlag);
+ SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
+
+ // With PIC32, the address is actually $g + Offset.
+ if (PIC32)
+ Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
+ DAG.getNode(X86ISD::GlobalBaseReg,
+ DebugLoc(), getPointerTy()),
+ Offset);
+
+ // Lowering the machine isd will make sure everything is in the right
+ // location.
+ SDValue Args[] = { Offset };
+ SDValue Chain = DAG.getNode(X86ISD::TLSCALL, DL, MVT::Other, Args, 1);
+
+ // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
+ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI->setAdjustsStack(true);
- case TLSModel::InitialExec:
- case TLSModel::LocalExec:
- return LowerToTLSExecModel(GA, DAG, getPointerTy(), model,
- Subtarget->is64Bit());
+ // And our return value (tls address) is in the standard call return value
+ // location.
+ unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
+ return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
}
+
+ assert(false &&
+ "TLS not implemented for this target.");
llvm_unreachable("Unreachable");
return SDValue();
@@ -5715,7 +5783,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
// Load the value out, extending it from f32 to f80.
// FIXME: Avoid the extend by constructing the right constant pool?
- SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
+ SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, MVT::f80, dl, DAG.getEntryNode(),
FudgePtr, PseudoSourceValue::getConstantPool(),
0, MVT::f32, false, false, 4);
// Extend everything to 80 bits to force it to be done on x87.
@@ -5964,6 +6032,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
bool NeedCF = false;
bool NeedOF = false;
switch (X86CC) {
+ default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
NeedCF = true;
@@ -5973,120 +6042,129 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC,
case X86::COND_O: case X86::COND_NO:
NeedOF = true;
break;
- default: break;
}
// See if we can use the EFLAGS value from the operand instead of
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
- if (Op.getResNo() == 0 && !NeedOF && !NeedCF) {
- unsigned Opcode = 0;
- unsigned NumOperands = 0;
- switch (Op.getNode()->getOpcode()) {
- case ISD::ADD:
- // Due to an isel shortcoming, be conservative if this add is
- // likely to be selected as part of a load-modify-store
- // instruction. When the root node in a match is a store, isel
- // doesn't know how to remap non-chain non-flag uses of other
- // nodes in the match, such as the ADD in this case. This leads
- // to the ADD being left around and reselected, with the result
- // being two adds in the output. Alas, even if none our users
- // are stores, that doesn't prove we're O.K. Ergo, if we have
- // any parents that aren't CopyToReg or SETCC, eschew INC/DEC.
- // A better fix seems to require climbing the DAG back to the
- // root, and it doesn't seem to be worth the effort.
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI)
- if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC)
- goto default_case;
- if (ConstantSDNode *C =
- dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
- // An add of one will be selected as an INC.
- if (C->getAPIntValue() == 1) {
- Opcode = X86ISD::INC;
- NumOperands = 1;
- break;
- }
- // An add of negative one (subtract of one) will be selected as a DEC.
- if (C->getAPIntValue().isAllOnesValue()) {
- Opcode = X86ISD::DEC;
- NumOperands = 1;
- break;
- }
+ if (Op.getResNo() != 0 || NeedOF || NeedCF)
+ // Emit a CMP with 0, which is the TEST pattern.
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
+
+ unsigned Opcode = 0;
+ unsigned NumOperands = 0;
+ switch (Op.getNode()->getOpcode()) {
+ case ISD::ADD:
+ // Due to an isel shortcoming, be conservative if this add is likely to be
+ // selected as part of a load-modify-store instruction. When the root node
+ // in a match is a store, isel doesn't know how to remap non-chain non-flag
+ // uses of other nodes in the match, such as the ADD in this case. This
+ // leads to the ADD being left around and reselected, with the result being
+ // two adds in the output. Alas, even if none our users are stores, that
+ // doesn't prove we're O.K. Ergo, if we have any parents that aren't
+ // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
+ // climbing the DAG back to the root, and it doesn't seem to be worth the
+ // effort.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI)
+ if (UI->getOpcode() != ISD::CopyToReg && UI->getOpcode() != ISD::SETCC)
+ goto default_case;
+
+ if (ConstantSDNode *C =
+ dyn_cast<ConstantSDNode>(Op.getNode()->getOperand(1))) {
+ // An add of one will be selected as an INC.
+ if (C->getAPIntValue() == 1) {
+ Opcode = X86ISD::INC;
+ NumOperands = 1;
+ break;
}
- // Otherwise use a regular EFLAGS-setting add.
- Opcode = X86ISD::ADD;
- NumOperands = 2;
- break;
- case ISD::AND: {
- // If the primary and result isn't used, don't bother using X86ISD::AND,
- // because a TEST instruction will be better.
- bool NonFlagUse = false;
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
- UE = Op.getNode()->use_end(); UI != UE; ++UI) {
- SDNode *User = *UI;
- unsigned UOpNo = UI.getOperandNo();
- if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
- // Look pass truncate.
- UOpNo = User->use_begin().getOperandNo();
- User = *User->use_begin();
- }
- if (User->getOpcode() != ISD::BRCOND &&
- User->getOpcode() != ISD::SETCC &&
- (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
- NonFlagUse = true;
- break;
- }
+
+ // An add of negative one (subtract of one) will be selected as a DEC.
+ if (C->getAPIntValue().isAllOnesValue()) {
+ Opcode = X86ISD::DEC;
+ NumOperands = 1;
+ break;
}
- if (!NonFlagUse)
+ }
+
+ // Otherwise use a regular EFLAGS-setting add.
+ Opcode = X86ISD::ADD;
+ NumOperands = 2;
+ break;
+ case ISD::AND: {
+ // If the primary and result isn't used, don't bother using X86ISD::AND,
+ // because a TEST instruction will be better.
+ bool NonFlagUse = false;
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ UE = Op.getNode()->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ unsigned UOpNo = UI.getOperandNo();
+ if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
+ // Look pass truncate.
+ UOpNo = User->use_begin().getOperandNo();
+ User = *User->use_begin();
+ }
+
+ if (User->getOpcode() != ISD::BRCOND &&
+ User->getOpcode() != ISD::SETCC &&
+ (User->getOpcode() != ISD::SELECT || UOpNo != 0)) {
+ NonFlagUse = true;
break;
+ }
}
+
+ if (!NonFlagUse)
+ break;
+ }
// FALL THROUGH
- case ISD::SUB:
- case ISD::OR:
- case ISD::XOR:
- // Due to the ISEL shortcoming noted above, be conservative if this op is
- // likely to be selected as part of a load-modify-store instruction.
- for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
+ case ISD::SUB:
+ case ISD::OR:
+ case ISD::XOR:
+ // Due to the ISEL shortcoming noted above, be conservative if this op is
+ // likely to be selected as part of a load-modify-store instruction.
+ for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
- if (UI->getOpcode() == ISD::STORE)
- goto default_case;
- // Otherwise use a regular EFLAGS-setting instruction.
- switch (Op.getNode()->getOpcode()) {
- case ISD::SUB: Opcode = X86ISD::SUB; break;
- case ISD::OR: Opcode = X86ISD::OR; break;
- case ISD::XOR: Opcode = X86ISD::XOR; break;
- case ISD::AND: Opcode = X86ISD::AND; break;
- default: llvm_unreachable("unexpected operator!");
- }
- NumOperands = 2;
- break;
- case X86ISD::ADD:
- case X86ISD::SUB:
- case X86ISD::INC:
- case X86ISD::DEC:
- case X86ISD::OR:
- case X86ISD::XOR:
- case X86ISD::AND:
- return SDValue(Op.getNode(), 1);
- default:
- default_case:
- break;
- }
- if (Opcode != 0) {
- SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0; i != NumOperands; ++i)
- Ops.push_back(Op.getOperand(i));
- SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
- DAG.ReplaceAllUsesWith(Op, New);
- return SDValue(New.getNode(), 1);
+ if (UI->getOpcode() == ISD::STORE)
+ goto default_case;
+
+ // Otherwise use a regular EFLAGS-setting instruction.
+ switch (Op.getNode()->getOpcode()) {
+ default: llvm_unreachable("unexpected operator!");
+ case ISD::SUB: Opcode = X86ISD::SUB; break;
+ case ISD::OR: Opcode = X86ISD::OR; break;
+ case ISD::XOR: Opcode = X86ISD::XOR; break;
+ case ISD::AND: Opcode = X86ISD::AND; break;
}
+
+ NumOperands = 2;
+ break;
+ case X86ISD::ADD:
+ case X86ISD::SUB:
+ case X86ISD::INC:
+ case X86ISD::DEC:
+ case X86ISD::OR:
+ case X86ISD::XOR:
+ case X86ISD::AND:
+ return SDValue(Op.getNode(), 1);
+ default:
+ default_case:
+ break;
}
- // Otherwise just emit a CMP with 0, which is the TEST pattern.
- return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
- DAG.getConstant(0, Op.getValueType()));
+ if (Opcode == 0)
+ // Emit a CMP with 0, which is the TEST pattern.
+ return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+ DAG.getConstant(0, Op.getValueType()));
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+ SmallVector<SDValue, 4> Ops;
+ for (unsigned i = 0; i != NumOperands; ++i)
+ Ops.push_back(Op.getOperand(i));
+
+ SDValue New = DAG.getNode(Opcode, dl, VTs, &Ops[0], NumOperands);
+ DAG.ReplaceAllUsesWith(Op, New);
+ return SDValue(New.getNode(), 1);
}
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
@@ -6113,15 +6191,21 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
Op1 = Op1.getOperand(0);
SDValue LHS, RHS;
- if (Op1.getOpcode() == ISD::SHL) {
- if (ConstantSDNode *And10C = dyn_cast<ConstantSDNode>(Op1.getOperand(0)))
- if (And10C->getZExtValue() == 1) {
- LHS = Op0;
- RHS = Op1.getOperand(1);
- }
- } else if (Op0.getOpcode() == ISD::SHL) {
+ if (Op1.getOpcode() == ISD::SHL)
+ std::swap(Op0, Op1);
+ if (Op0.getOpcode() == ISD::SHL) {
if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
if (And00C->getZExtValue() == 1) {
+ // If we looked past a truncate, check that it's only truncating away
+ // known zeros.
+ unsigned BitWidth = Op0.getValueSizeInBits();
+ unsigned AndBitWidth = And.getValueSizeInBits();
+ if (BitWidth > AndBitWidth) {
+ APInt Mask = APInt::getAllOnesValue(BitWidth), Zeros, Ones;
+ DAG.ComputeMaskedBits(Op0, Mask, Zeros, Ones);
+ if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
+ return SDValue();
+ }
LHS = Op1;
RHS = Op0.getOperand(1);
}
@@ -6172,7 +6256,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
if (Op0.getOpcode() == ISD::AND &&
Op0.hasOneUse() &&
Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op1)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(Op1)->isNullValue() &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
if (NewSetCC.getNode())
@@ -6552,15 +6636,16 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, MVT::i8);
- SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
+ SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
- if (User.getOpcode() == ISD::BR) {
- SDValue FalseBB = User.getOperand(1);
- SDValue NewBR =
- DAG.UpdateNodeOperands(User, User.getOperand(0), Dest);
+ if (User->getOpcode() == ISD::BR) {
+ SDValue FalseBB = User->getOperand(1);
+ SDNode *NewBR =
+ DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
+ (void)NewBR;
Dest = FalseBB;
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
@@ -6632,7 +6717,6 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDValue Flag;
- EVT IntPtr = getPointerTy();
EVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
@@ -6685,7 +6769,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
Store = DAG.getStore(Op.getOperand(0), dl,
DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
MVT::i32),
- FIN, SV, 0, false, false, 0);
+ FIN, SV, 4, false, false, 0);
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
@@ -6693,7 +6777,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
FIN, DAG.getIntPtrConstant(4));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 0,
+ Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 8,
false, false, 0);
MemOps.push_back(Store);
@@ -6702,7 +6786,7 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
FIN, DAG.getIntPtrConstant(8));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
getPointerTy());
- Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0,
+ Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 16,
false, false, 0);
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
@@ -6712,9 +6796,6 @@ SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!");
- SDValue Chain = Op.getOperand(0);
- SDValue SrcPtr = Op.getOperand(1);
- SDValue SrcSV = Op.getOperand(2);
report_fatal_error("VAArgInst is not yet implemented for x86-64!");
return SDValue();
@@ -7733,6 +7814,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
+ case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
@@ -7917,7 +7999,6 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
unsigned immOpc,
unsigned LoadOpc,
unsigned CXchgOpc,
- unsigned copyOpc,
unsigned notOpc,
unsigned EAXreg,
TargetRegisterClass *RC,
@@ -7944,8 +8025,11 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors to thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(bInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -7955,17 +8039,17 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
newMBB->addSuccessor(newMBB);
// Insert instructions into newMBB based on incoming instruction
- assert(bInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
"unexpected number of operands");
DebugLoc dl = bInstr->getDebugLoc();
MachineOperand& destOper = bInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
int numArgs = bInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &bInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
@@ -7991,7 +8075,7 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
MIB.addReg(tt);
(*MIB).addOperand(*argOpers[valArgIndx]);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), EAXreg);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg);
MIB.addReg(t1);
MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
@@ -8002,13 +8086,13 @@ X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), destOper.getReg());
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
MIB.addReg(EAXreg);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
+ bInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
@@ -8038,7 +8122,6 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
const TargetRegisterClass *RC = X86::GR32RegisterClass;
const unsigned LoadOpc = X86::MOV32rm;
- const unsigned copyOpc = X86::MOV32rr;
const unsigned NotOpc = X86::NOT32r;
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
@@ -8053,8 +8136,11 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors to thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(bInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -8066,12 +8152,12 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
DebugLoc dl = bInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
// There are 8 "real" operands plus 9 implicit def/uses, ignored here.
- assert(bInstr->getNumOperands() < X86AddrNumOperands + 14 &&
+ assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 &&
"unexpected number of operands");
MachineOperand& dest1Oper = bInstr->getOperand(0);
MachineOperand& dest2Oper = bInstr->getOperand(1);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
- for (int i=0; i < 2 + X86AddrNumOperands; ++i) {
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
+ for (int i=0; i < 2 + X86::AddrNumOperands; ++i) {
argOpers[i] = &bInstr->getOperand(i+2);
// We use some of the operands multiple times, so conservatively just
@@ -8081,7 +8167,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
}
// x86 address has 5 operands: base, index, scale, displacement, and segment.
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
@@ -8145,14 +8231,14 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
MIB.addReg(t2);
(*MIB).addOperand(*argOpers[valArgIndx + 1]);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EAX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
MIB.addReg(t1);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EDX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX);
MIB.addReg(t2);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::EBX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX);
MIB.addReg(t5);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::ECX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX);
MIB.addReg(t6);
MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
@@ -8163,15 +8249,15 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
(*MIB).setMemRefs(bInstr->memoperands_begin(),
bInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t3);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3);
MIB.addReg(X86::EAX);
- MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t4);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4);
MIB.addReg(X86::EDX);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
+ bInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
@@ -8205,8 +8291,11 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
F->insert(MBBIter, newMBB);
F->insert(MBBIter, nextMBB);
- // Move all successors of thisMBB to nextMBB
- nextMBB->transferSuccessors(thisMBB);
+ // Transfer the remainder of thisMBB and its successor edges to nextMBB.
+ nextMBB->splice(nextMBB->begin(), thisMBB,
+ llvm::next(MachineBasicBlock::iterator(mInstr)),
+ thisMBB->end());
+ nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Update thisMBB to fall through to newMBB
thisMBB->addSuccessor(newMBB);
@@ -8217,16 +8306,16 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
DebugLoc dl = mInstr->getDebugLoc();
// Insert instructions into newMBB based on incoming instruction
- assert(mInstr->getNumOperands() < X86AddrNumOperands + 4 &&
+ assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 &&
"unexpected number of operands");
MachineOperand& destOper = mInstr->getOperand(0);
- MachineOperand* argOpers[2 + X86AddrNumOperands];
+ MachineOperand* argOpers[2 + X86::AddrNumOperands];
int numArgs = mInstr->getNumOperands() - 1;
for (int i=0; i < numArgs; ++i)
argOpers[i] = &mInstr->getOperand(i+1);
// x86 address has 4 operands: base, index, scale, and displacement
- int lastAddrIndx = X86AddrNumOperands - 1; // [0,3]
+ int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3]
int valArgIndx = lastAddrIndx + 1;
unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
@@ -8241,12 +8330,12 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
if (argOpers[valArgIndx]->isReg())
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2);
else
MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
(*MIB).addOperand(*argOpers[valArgIndx]);
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), X86::EAX);
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX);
MIB.addReg(t1);
MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr));
@@ -8268,13 +8357,13 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
(*MIB).setMemRefs(mInstr->memoperands_begin(),
mInstr->memoperands_end());
- MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), destOper.getReg());
+ MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg());
MIB.addReg(X86::EAX);
// insert branch
BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB);
- F->DeleteMachineInstr(mInstr); // The pseudo instruction is gone now.
+ mInstr->eraseFromParent(); // The pseudo instruction is gone now.
return nextMBB;
}
@@ -8284,7 +8373,6 @@ MachineBasicBlock *
X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
unsigned numArgs, bool memArg) const {
- MachineFunction *F = BB->getParent();
DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
@@ -8306,7 +8394,7 @@ X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB,
BuildMI(BB, dl, TII->get(X86::MOVAPSrr), MI->getOperand(0).getReg())
.addReg(X86::XMM0);
- F->DeleteMachineInstr(MI);
+ MI->eraseFromParent();
return BB;
}
@@ -8335,9 +8423,12 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
F->insert(MBBIter, XMMSaveMBB);
F->insert(MBBIter, EndMBB);
- // Set up the CFG.
- // Move any original successors of MBB to the end block.
- EndMBB->transferSuccessors(MBB);
+ // Transfer the remainder of MBB and its successor edges to EndMBB.
+ EndMBB->splice(EndMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
// The original block will now fall through to the XMM save block.
MBB->addSuccessor(XMMSaveMBB);
// The XMMSaveMBB will fall through to the end block.
@@ -8376,7 +8467,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
.addMemOperand(MMO);
}
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
@@ -8405,24 +8496,39 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- unsigned Opc =
- X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
- BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // If the EFLAGS register isn't dead in the terminator, then claim that it's
+ // live into the sink and copy blocks.
+ const MachineFunction *MF = BB->getParent();
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ BitVector ReservedRegs = TRI->getReservedRegs(*MF);
+
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (!MO.isReg() || !MO.isUse() || MO.isKill()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg != X86::EFLAGS) continue;
+ copy0MBB->addLiveIn(Reg);
+ sinkMBB->addLiveIn(Reg);
+ }
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ // Create the conditional branch instruction.
+ unsigned Opc =
+ X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
+ BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -8431,11 +8537,12 @@ X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
- BuildMI(sinkMBB, DL, TII->get(X86::PHI), MI->getOperand(0).getReg())
+ BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(X86::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return sinkMBB;
}
@@ -8444,21 +8551,70 @@ X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const {
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
- MachineFunction *F = BB->getParent();
// The lowering is pretty easy: we're just emitting the call to _alloca. The
// non-trivial part is impdef of ESP.
// FIXME: The code should be tweaked as soon as we'll try to do codegen for
// mingw-w64.
- BuildMI(BB, DL, TII->get(X86::CALLpcrel32))
+ BuildMI(*BB, MI, DL, TII->get(X86::CALLpcrel32))
.addExternalSymbol("_alloca")
.addReg(X86::EAX, RegState::Implicit)
.addReg(X86::ESP, RegState::Implicit)
.addReg(X86::EAX, RegState::Define | RegState::Implicit)
.addReg(X86::ESP, RegState::Define | RegState::Implicit);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
+ return BB;
+}
+
+MachineBasicBlock *
+X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+ // This is pretty easy. We're taking the value that we received from
+ // our load from the relocation, sticking it in either RDI (x86-64)
+ // or EAX and doing an indirect call. The return value will then
+ // be in the normal return register.
+ const X86InstrInfo *TII
+ = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo());
+ DebugLoc DL = MI->getDebugLoc();
+ MachineFunction *F = BB->getParent();
+
+ assert(MI->getOperand(3).isGlobal() && "This should be a global");
+
+ if (Subtarget->is64Bit()) {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV64rm), X86::RDI)
+ .addReg(X86::RIP)
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
+ addDirectMem(MIB, X86::RDI);
+ } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(0)
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
+ addDirectMem(MIB, X86::EAX);
+ } else {
+ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
+ TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(TII->getGlobalBaseReg(F))
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
+ addDirectMem(MIB, X86::EAX);
+ }
+
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -8469,6 +8625,9 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
default: assert(false && "Unexpected instr type to insert");
case X86::MINGW_ALLOCA:
return EmitLoweredMingwAlloca(MI, BB);
+ case X86::TLSCall_32:
+ case X86::TLSCall_64:
+ return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_GR8:
case X86::CMOV_V1I64:
case X86::CMOV_FR32:
@@ -8499,23 +8658,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// mode when truncating to an integer value.
MachineFunction *F = BB->getParent();
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FNSTCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16rm), OldCW),
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
CWFrameIdx);
// Set the high part to be round to zero...
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FLDCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
- addFrameReference(BuildMI(BB, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
+ addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
// Get the X86 opcode to use.
@@ -8554,13 +8715,14 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
} else {
AM.Disp = Op.getImm();
}
- addFullAddress(BuildMI(BB, DL, TII->get(Opc)), AM)
- .addReg(MI->getOperand(X86AddrNumOperands).getReg());
+ addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
+ .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
- addFrameReference(BuildMI(BB, DL, TII->get(X86::FLDCW16m)), CWFrameIdx);
+ addFrameReference(BuildMI(*BB, MI, DL,
+ TII->get(X86::FLDCW16m)), CWFrameIdx);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// String/text processing lowering.
@@ -8577,25 +8739,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
X86::OR32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMXOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
X86::XOR32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass);
case X86::ATOMNAND32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
X86::AND32ri, X86::MOV32rm,
- X86::LCMPXCHG32, X86::MOV32rr,
+ X86::LCMPXCHG32,
X86::NOT32r, X86::EAX,
X86::GR32RegisterClass, true);
case X86::ATOMMIN32:
@@ -8610,25 +8772,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
X86::OR16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMXOR16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
X86::XOR16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass);
case X86::ATOMNAND16:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
X86::AND16ri, X86::MOV16rm,
- X86::LCMPXCHG16, X86::MOV16rr,
+ X86::LCMPXCHG16,
X86::NOT16r, X86::AX,
X86::GR16RegisterClass, true);
case X86::ATOMMIN16:
@@ -8643,25 +8805,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
X86::OR8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMXOR8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
X86::XOR8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass);
case X86::ATOMNAND8:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
X86::AND8ri, X86::MOV8rm,
- X86::LCMPXCHG8, X86::MOV8rr,
+ X86::LCMPXCHG8,
X86::NOT8r, X86::AL,
X86::GR8RegisterClass, true);
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
@@ -8669,25 +8831,25 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::ATOMAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
X86::OR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMXOR64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
X86::XOR64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass);
case X86::ATOMNAND64:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
X86::AND64ri32, X86::MOV64rm,
- X86::LCMPXCHG64, X86::MOV64rr,
+ X86::LCMPXCHG64,
X86::NOT64r, X86::RAX,
X86::GR64RegisterClass, true);
case X86::ATOMMIN64:
@@ -9513,8 +9675,10 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
if (ShAmt1.getOpcode() == ISD::SUB) {
SDValue Sum = ShAmt1.getOperand(0);
if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
- if (SumC->getSExtValue() == Bits &&
- ShAmt1.getOperand(1) == ShAmt0)
+ SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
+ if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
+ ShAmt1Op1 = ShAmt1Op1.getOperand(0);
+ if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
return DAG.getNode(Opc, DL, VT,
Op0, Op1,
DAG.getNode(ISD::TRUNCATE, DL,
@@ -9710,58 +9874,6 @@ static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
-// On X86 and X86-64, atomic operations are lowered to locked instructions.
-// Locked instructions, in turn, have implicit fence semantics (all memory
-// operations are flushed before issuing the locked instruction, and the
-// are not buffered), so we can fold away the common pattern of
-// fence-atomic-fence.
-static SDValue PerformMEMBARRIERCombine(SDNode* N, SelectionDAG &DAG) {
- SDValue atomic = N->getOperand(0);
- switch (atomic.getOpcode()) {
- case ISD::ATOMIC_CMP_SWAP:
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- break;
- default:
- return SDValue();
- }
-
- SDValue fence = atomic.getOperand(0);
- if (fence.getOpcode() != ISD::MEMBARRIER)
- return SDValue();
-
- switch (atomic.getOpcode()) {
- case ISD::ATOMIC_CMP_SWAP:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
- atomic.getOperand(1), atomic.getOperand(2),
- atomic.getOperand(3));
- case ISD::ATOMIC_SWAP:
- case ISD::ATOMIC_LOAD_ADD:
- case ISD::ATOMIC_LOAD_SUB:
- case ISD::ATOMIC_LOAD_AND:
- case ISD::ATOMIC_LOAD_OR:
- case ISD::ATOMIC_LOAD_XOR:
- case ISD::ATOMIC_LOAD_NAND:
- case ISD::ATOMIC_LOAD_MIN:
- case ISD::ATOMIC_LOAD_MAX:
- case ISD::ATOMIC_LOAD_UMIN:
- case ISD::ATOMIC_LOAD_UMAX:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
- atomic.getOperand(1), atomic.getOperand(2));
- default:
- return SDValue();
- }
-}
-
static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
@@ -9809,7 +9921,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
- case ISD::MEMBARRIER: return PerformMEMBARRIERCombine(N, DAG);
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
}
@@ -9932,8 +10043,8 @@ static bool LowerToBSwap(CallInst *CI) {
// so don't worry about this.
// Verify this is a simple bswap.
- if (CI->getNumOperands() != 2 ||
- CI->getType() != CI->getOperand(1)->getType() ||
+ if (CI->getNumArgOperands() != 1 ||
+ CI->getType() != CI->getArgOperand(0)->getType() ||
!CI->getType()->isIntegerTy())
return false;
@@ -9946,7 +10057,7 @@ static bool LowerToBSwap(CallInst *CI) {
Module *M = CI->getParent()->getParent()->getParent();
Constant *Int = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = CallInst::Create(Int, Op, CI->getName(), CI);
CI->replaceAllUsesWith(Op);
@@ -10079,7 +10190,6 @@ LowerXConstraint(EVT ConstraintVT) const {
/// vector. If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
char Constraint,
- bool hasMemory,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result(0, 0);
@@ -10121,9 +10231,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getSExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
break;
@@ -10136,9 +10245,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
case 'Z': {
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getZExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
@@ -10155,6 +10263,12 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
break;
}
+ // In any sort of PIC mode addresses need to be computed at runtime by
+ // adding in a register or some sort of table lookup. These can't
+ // be used as immediates.
+ if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
+ return;
+
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
GlobalAddressSDNode *GA = 0;
@@ -10190,11 +10304,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
getTargetMachine())))
return;
- if (hasMemory)
- Op = LowerGlobalAddress(GV, Op.getDebugLoc(), Offset, DAG);
- else
- Op = DAG.getTargetGlobalAddress(GV, GA->getValueType(0), Offset);
- Result = Op;
+ Result = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(),
+ GA->getValueType(0), Offset);
break;
}
}
@@ -10203,8 +10314,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
Ops.push_back(Result);
return;
}
- return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
- Ops, DAG);
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
std::vector<unsigned> X86TargetLowering::
diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.h b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
index 1ef1a7b..4e4daa4 100644
--- a/contrib/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.h
@@ -196,6 +196,10 @@ namespace llvm {
// TLSADDR - Thread Local Storage.
TLSADDR,
+
+ // TLSCALL - Thread Local Storage. When calling to an OS provided
+ // thunk at the address from an earlier relocation.
+ TLSCALL,
// SegmentBaseAddress - The address segment:0
SegmentBaseAddress,
@@ -496,7 +500,6 @@ namespace llvm {
/// being processed is 'm'.
virtual void LowerAsmOperandForConstraint(SDValue Op,
char ConstraintLetter,
- bool hasMemory,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@@ -576,20 +579,17 @@ namespace llvm {
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *
- createFastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &,
- std::vector<std::pair<MachineInstr*, unsigned> > &
-#ifndef NDEBUG
- , SmallSet<const Instruction *, 8> &
-#endif
- ) const;
+ virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;
+ /// getStackCookieLocation - Return true if the target stores stack
+ /// protector cookies at a fixed offset in some non-standard address
+ /// space, and populates the address space and offset as
+ /// appropriate.
+ virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
+
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
@@ -643,6 +643,7 @@ namespace llvm {
bool isCalleeStructRet,
bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
@@ -725,6 +726,7 @@ namespace llvm {
LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -733,13 +735,13 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const;
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const;
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, unsigned NewOp) const;
@@ -762,7 +764,6 @@ namespace llvm {
unsigned immOpc,
unsigned loadOpc,
unsigned cxchgOpc,
- unsigned copyOpc,
unsigned notOpc,
unsigned EAXreg,
TargetRegisterClass *RC,
@@ -794,6 +795,9 @@ namespace llvm {
MachineBasicBlock *EmitLoweredMingwAlloca(MachineInstr *MI,
MachineBasicBlock *BB) const;
+
+ MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
@@ -806,15 +810,7 @@ namespace llvm {
};
namespace X86 {
- FastISel *createFastISel(MachineFunction &mf,
- DenseMap<const Value *, unsigned> &,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &,
- DenseMap<const AllocaInst *, int> &,
- std::vector<std::pair<MachineInstr*, unsigned> > &
-#ifndef NDEBUG
- , SmallSet<const Instruction*, 8> &
-#endif
- );
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
}
}
diff --git a/contrib/llvm/lib/Target/X86/X86Instr64bit.td b/contrib/llvm/lib/Target/X86/X86Instr64bit.td
index 97eb17c..42d0e7f 100644
--- a/contrib/llvm/lib/Target/X86/X86Instr64bit.td
+++ b/contrib/llvm/lib/Target/X86/X86Instr64bit.td
@@ -35,6 +35,14 @@ def i64i8imm : Operand<i64> {
let ParserMatchClass = ImmSExti64i8AsmOperand;
}
+def lea64_32mem : Operand<i32> {
+ let PrintMethod = "printi32mem";
+ let AsmOperandLowerMethod = "lower_lea64_32mem";
+ let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
+
// Special i64mem for addresses of load folding tail calls. These are not
// allowed to use callee-saved registers since they must be scheduled
// after callee-saved register are popped.
@@ -44,29 +52,16 @@ def i64mem_TC : Operand<i64> {
let ParserMatchClass = X86MemAsmOperand;
}
-def lea64mem : Operand<i64> {
- let PrintMethod = "printlea64mem";
- let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
- let ParserMatchClass = X86NoSegMemAsmOperand;
-}
-
-def lea64_32mem : Operand<i32> {
- let PrintMethod = "printlea64_32mem";
- let AsmOperandLowerMethod = "lower_lea64_32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
- let ParserMatchClass = X86NoSegMemAsmOperand;
-}
-
//===----------------------------------------------------------------------===//
// Complex Pattern Definitions.
//
-def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
+def lea64addr : ComplexPattern<i64, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex,
X86WrapperRIP], []>;
-def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
+def tls64addr : ComplexPattern<i64, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
-
+
//===----------------------------------------------------------------------===//
// Pattern fragments.
//
@@ -289,11 +284,11 @@ def LEA64_32r : I<0x8D, MRMSrcMem,
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In64BitMode]>;
let isReMaterializable = 1 in
-def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins lea64mem:$src),
+def LEA64r : RI<0x8D, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"lea{q}\t{$src|$dst}, {$dst|$src}",
[(set GR64:$dst, lea64addr:$src)]>;
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
[(set GR64:$dst, (bswap GR64:$src))]>, TB;
@@ -521,7 +516,7 @@ let Defs = [EFLAGS] in {
def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
"add{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isConvertibleToThreeAddress = 1 in {
let isCommutable = 1 in
// Register-Register Addition
@@ -559,7 +554,7 @@ def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst),
[(set GR64:$dst, EFLAGS,
(X86add_flag GR64:$src1, (load addr:$src2)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -580,7 +575,7 @@ let Uses = [EFLAGS] in {
def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
"adc{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -606,7 +601,7 @@ def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
@@ -621,7 +616,7 @@ def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
addr:$dst)]>;
} // Uses = [EFLAGS]
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -653,7 +648,7 @@ def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
"sub{q}\t{$src, %rax|%rax, $src}", []>;
@@ -677,7 +672,7 @@ def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2),
(implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
@@ -702,7 +697,7 @@ def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
(ins GR64:$src1, i64i32imm:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
"sbb{q}\t{$src, %rax|%rax, $src}", []>;
@@ -736,7 +731,7 @@ def IMUL64m : RI<0xF7, MRM5m, (outs), (ins i64mem:$src),
}
let Defs = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
// Register-Register Signed Integer Multiplication
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
@@ -751,7 +746,7 @@ def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst),
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Suprisingly enough, these are not two address instructions!
@@ -803,7 +798,7 @@ def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src),
// Unary instructions
let Defs = [EFLAGS], CodeSize = 2 in {
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
[(set GR64:$dst, (ineg GR64:$src)),
(implicit EFLAGS)]>;
@@ -811,14 +806,14 @@ def NEG64m : RI<0xF7, MRM3m, (outs), (ins i64mem:$dst), "neg{q}\t$dst",
[(store (ineg (loadi64 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
@@ -826,7 +821,7 @@ def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
(implicit EFLAGS)]>;
// In 64-bit mode, single byte INC and DEC cannot be encoded.
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
// Can transform into LEA.
def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
"inc{w}\t$dst",
@@ -844,38 +839,36 @@ def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src),
"dec{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
Requires<[In64BitMode]>;
-} // isConvertibleToThreeAddress
+} // Constraints = "$src = $dst", isConvertibleToThreeAddress
// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
// how to unfold them.
-let isTwoAddress = 0, CodeSize = 2 in {
- def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
- def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
-}
+def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
+def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
} // Defs = [EFLAGS], CodeSize
let Defs = [EFLAGS] in {
// Shift instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
+def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (shl GR64:$src, CL))]>;
+ [(set GR64:$dst, (shl GR64:$src1, CL))]>;
let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
@@ -885,7 +878,7 @@ def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
// 'add reg,reg' is cheaper.
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t$dst", []>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
@@ -898,18 +891,18 @@ def SHL64m1 : RI<0xD1, MRM4m, (outs), (ins i64mem:$dst),
"shl{q}\t$dst",
[(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
+def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (srl GR64:$src, CL))]>;
+ [(set GR64:$dst, (srl GR64:$src1, CL))]>;
def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
"shr{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t$dst",
[(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
@@ -922,11 +915,11 @@ def SHR64m1 : RI<0xD1, MRM5m, (outs), (ins i64mem:$dst),
"shr{q}\t$dst",
[(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
+def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (sra GR64:$src, CL))]>;
+ [(set GR64:$dst, (sra GR64:$src1, CL))]>;
def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"sar{q}\t{$src2, $dst|$dst, $src2}",
@@ -934,7 +927,7 @@ def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t$dst",
[(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src = $dst"
let Uses = [CL] in
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
@@ -949,7 +942,7 @@ def SAR64m1 : RI<0xD1, MRM7m, (outs), (ins i64mem:$dst),
// Rotate instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src = $dst" in {
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
@@ -966,9 +959,8 @@ def RCL64rCL : RI<0xD3, MRM2r, (outs GR64:$dst), (ins GR64:$src),
def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
+} // Constraints = "$src = $dst"
-let isTwoAddress = 0 in {
def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
@@ -984,13 +976,12 @@ def RCL64mCL : RI<0xD3, MRM2m, (outs), (ins i64mem:$dst),
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
+def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotl GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
@@ -998,7 +989,7 @@ def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t$dst",
[(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
@@ -1011,11 +1002,11 @@ def ROL64m1 : RI<0xD1, MRM0m, (outs), (ins i64mem:$dst),
"rol{q}\t$dst",
[(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
+def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotr GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
@@ -1023,7 +1014,7 @@ def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t$dst",
[(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
@@ -1037,7 +1028,7 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst),
[(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
// Double shift instructions (generalizations of rotate)
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -1067,7 +1058,7 @@ def SHRD64rri8 : RIi8<0xAC, MRMDestReg,
(i8 imm:$src3)))]>,
TB;
} // isCommutable
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in {
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -1097,7 +1088,7 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem,
// Logical Instructions...
//
-let isTwoAddress = 1 , AddedComplexity = 15 in
+let Constraints = "$src = $dst" , AddedComplexity = 15 in
def NOT64r : RI<0xF7, MRM2r, (outs GR64:$dst), (ins GR64:$src), "not{q}\t$dst",
[(set GR64:$dst, (not GR64:$src))]>;
def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
@@ -1107,7 +1098,7 @@ let Defs = [EFLAGS] in {
def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i64i32imm:$src),
"and{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def AND64rr : RI<0x21, MRMDestReg,
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
@@ -1134,7 +1125,7 @@ def AND64ri32 : RIi32<0x81, MRM4r,
"and{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86and_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def AND64mr : RI<0x21, MRMDestMem,
(outs), (ins i64mem:$dst, GR64:$src),
@@ -1152,7 +1143,7 @@ def AND64mi32 : RIi32<0x81, MRM4m,
[(store (and (loadi64 addr:$dst), i64immSExt32:$src), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -1179,7 +1170,7 @@ def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst),
"or{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86or_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def OR64mr : RI<0x09, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"or{q}\t{$src, $dst|$dst, $src}",
@@ -1197,7 +1188,7 @@ def OR64mi32 : RIi32<0x81, MRM1m, (outs), (ins i64mem:$dst, i64i32imm:$src),
def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i64i32imm:$src),
"or{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -1224,7 +1215,7 @@ def XOR64ri32 : RIi32<0x81, MRM6r,
"xor{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86xor_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def XOR64mr : RI<0x31, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"xor{q}\t{$src, $dst|$dst, $src}",
@@ -1366,7 +1357,7 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2),
} // Defs = [EFLAGS]
// Conditional moves
-let Uses = [EFLAGS], isTwoAddress = 1 in {
+let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
let isCommutable = 1 in {
def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64
(outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
@@ -1530,7 +1521,7 @@ def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
"cmovno{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
X86_COND_NO, EFLAGS))]>, TB;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Use sbb to materialize carry flag into a GPR.
// FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
@@ -1588,7 +1579,7 @@ def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
"cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}",
@@ -1601,7 +1592,7 @@ def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem,
[(set VR128:$dst,
(int_x86_sse2_cvtsi642sd VR128:$src1,
(loadi64 addr:$src2)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Signed i64 -> f32
def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src),
@@ -1611,7 +1602,7 @@ def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src),
"cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, GR64:$src2),
"cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}",
@@ -1625,7 +1616,7 @@ let isTwoAddress = 1 in {
[(set VR128:$dst,
(int_x86_sse_cvtsi642ss VR128:$src1,
(loadi64 addr:$src2)))]>;
-}
+} // Constraints = "$src1 = $dst"
// f32 -> signed i64
def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
@@ -1691,6 +1682,7 @@ def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
// Thread Local Storage Instructions
//===----------------------------------------------------------------------===//
+// ELF TLS Support
// All calls clobber the non-callee saved registers. RSP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
@@ -1700,7 +1692,7 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Uses = [RSP] in
-def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
+def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
".byte\t0x66; "
"leaq\t$sym(%rip), %rdi; "
".word\t0x6666; "
@@ -1709,6 +1701,17 @@ def TLS_addr64 : I<0, Pseudo, (outs), (ins lea64mem:$sym),
[(X86tlsaddr tls64addr:$sym)]>,
Requires<[In64BitMode]>;
+// Darwin TLS Support
+// For x86_64, the address of the thunk is passed in %rdi, on return
+// the address of the variable is in %rax. All other registers are preserved.
+let Defs = [RAX],
+ Uses = [RDI],
+ usesCustomInserter = 1 in
+def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
+ "# TLSCall_64",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In64BitMode]>;
+
let AddedComplexity = 5, isCodeGenOnly = 1 in
def MOV64GSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
"movq\t%gs:$src, $dst",
@@ -1964,6 +1967,17 @@ def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
(TCRETURNdi64 texternalsym:$dst, imm:$off)>,
Requires<[In64BitMode]>;
+// tls has some funny stuff here...
+// This corresponds to movabs $foo@tpoff, %rax
+def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
+ (MOV64ri tglobaltlsaddr :$dst)>;
+// This corresponds to add $foo@tpoff, %rax
+def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
+ (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
+// This corresponds to mov foo@tpoff(%rbx), %eax
+def : Pat<(load (i64 (X86Wrapper tglobaltlsaddr :$dst))),
+ (MOV64rm tglobaltlsaddr :$dst)>;
+
// Comparisons.
// TEST R,R is smaller than CMP R,0
@@ -2332,45 +2346,3 @@ def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
-//===----------------------------------------------------------------------===//
-// X86-64 SSE4.1 Instructions
-//===----------------------------------------------------------------------===//
-
-/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
-multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set GR64:$dst,
- (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
- def mr : SS4AIi8<opc, MRMDestMem, (outs),
- (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
- addr:$dst)]>, OpSize, REX_W;
-}
-
-defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
-
-let isTwoAddress = 1 in {
- multiclass SS41I_insert64<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
- OpSize, REX_W;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
- imm:$src3)))]>, OpSize, REX_W;
- }
-}
-
-defm PINSRQ : SS41I_insert64<0x22, "pinsrq">;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
index 5a82a7b..2a6a71d 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrBuilder.h
@@ -64,19 +64,15 @@ struct X86AddressMode {
///
static inline const MachineInstrBuilder &
addDirectMem(const MachineInstrBuilder &MIB, unsigned Reg) {
- // Because memory references are always represented with four
- // values, this adds: Reg, [1, NoReg, 0] to the instruction.
- return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0);
+ // Because memory references are always represented with five
+ // values, this adds: Reg, 1, NoReg, 0, NoReg to the instruction.
+ return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0).addReg(0);
}
-static inline const MachineInstrBuilder &
-addLeaOffset(const MachineInstrBuilder &MIB, int Offset) {
- return MIB.addImm(1).addReg(0).addImm(Offset);
-}
static inline const MachineInstrBuilder &
addOffset(const MachineInstrBuilder &MIB, int Offset) {
- return addLeaOffset(MIB, Offset).addReg(0);
+ return MIB.addImm(1).addReg(0).addImm(Offset).addReg(0);
}
/// addRegOffset - This function is used to add a memory reference of the form
@@ -89,25 +85,20 @@ addRegOffset(const MachineInstrBuilder &MIB,
return addOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
}
-static inline const MachineInstrBuilder &
-addLeaRegOffset(const MachineInstrBuilder &MIB,
- unsigned Reg, bool isKill, int Offset) {
- return addLeaOffset(MIB.addReg(Reg, getKillRegState(isKill)), Offset);
-}
-
/// addRegReg - This function is used to add a memory reference of the form:
/// [Reg + Reg].
static inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB,
unsigned Reg1, bool isKill1,
unsigned Reg2, bool isKill2) {
return MIB.addReg(Reg1, getKillRegState(isKill1)).addImm(1)
- .addReg(Reg2, getKillRegState(isKill2)).addImm(0);
+ .addReg(Reg2, getKillRegState(isKill2)).addImm(0).addReg(0);
}
static inline const MachineInstrBuilder &
-addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
- assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
-
+addFullAddress(const MachineInstrBuilder &MIB,
+ const X86AddressMode &AM) {
+ assert(AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
+
if (AM.BaseType == X86AddressMode::RegBase)
MIB.addReg(AM.Base.Reg);
else if (AM.BaseType == X86AddressMode::FrameIndexBase)
@@ -116,15 +107,11 @@ addLeaAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM) {
assert (0);
MIB.addImm(AM.Scale).addReg(AM.IndexReg);
if (AM.GV)
- return MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
+ MIB.addGlobalAddress(AM.GV, AM.Disp, AM.GVOpFlags);
else
- return MIB.addImm(AM.Disp);
-}
-
-static inline const MachineInstrBuilder &
-addFullAddress(const MachineInstrBuilder &MIB,
- const X86AddressMode &AM) {
- return addLeaAddress(MIB, AM).addReg(0);
+ MIB.addImm(AM.Disp);
+
+ return MIB.addReg(0);
}
/// addFrameReference - This function is used to add a reference to the base of
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
index 0aae4a8..da93de9 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFPStack.td
@@ -371,7 +371,7 @@ multiclass FPCMov<PatLeaf cc> {
Requires<[HasCMov]>;
}
-let Uses = [EFLAGS], isTwoAddress = 1 in {
+let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
defm CMOVB : FPCMov<X86_COND_B>;
defm CMOVBE : FPCMov<X86_COND_BE>;
defm CMOVE : FPCMov<X86_COND_E>;
@@ -380,7 +380,7 @@ defm CMOVNB : FPCMov<X86_COND_AE>;
defm CMOVNBE: FPCMov<X86_COND_A>;
defm CMOVNE : FPCMov<X86_COND_NE>;
defm CMOVNP : FPCMov<X86_COND_NP>;
-}
+} // Uses = [EFLAGS], Constraints = "$src1 = $dst"
let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
@@ -680,19 +680,19 @@ def : Pat<(X86fildflag addr:$src, i64), (ILD_Fp64m64 addr:$src)>;
// FP extensions map onto simple pseudo-value conversions if they are to/from
// the FP stack.
-def : Pat<(f64 (fextend RFP32:$src)), (MOV_Fp3264 RFP32:$src)>,
+def : Pat<(f64 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP64)>,
Requires<[FPStackf32]>;
-def : Pat<(f80 (fextend RFP32:$src)), (MOV_Fp3280 RFP32:$src)>,
+def : Pat<(f80 (fextend RFP32:$src)), (COPY_TO_REGCLASS RFP32:$src, RFP80)>,
Requires<[FPStackf32]>;
-def : Pat<(f80 (fextend RFP64:$src)), (MOV_Fp6480 RFP64:$src)>,
+def : Pat<(f80 (fextend RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP80)>,
Requires<[FPStackf64]>;
// FP truncations map onto simple pseudo-value conversions if they are to/from
// the FP stack. We have validated that only value-preserving truncations make
// it through isel.
-def : Pat<(f32 (fround RFP64:$src)), (MOV_Fp6432 RFP64:$src)>,
+def : Pat<(f32 (fround RFP64:$src)), (COPY_TO_REGCLASS RFP64:$src, RFP32)>,
Requires<[FPStackf32]>;
-def : Pat<(f32 (fround RFP80:$src)), (MOV_Fp8032 RFP80:$src)>,
+def : Pat<(f32 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP32)>,
Requires<[FPStackf32]>;
-def : Pat<(f64 (fround RFP80:$src)), (MOV_Fp8064 RFP80:$src)>,
+def : Pat<(f64 (fround RFP80:$src)), (COPY_TO_REGCLASS RFP80:$src, RFP64)>,
Requires<[FPStackf64]>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFormats.td b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
index c4522f3..cc3fdf1 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFormats.td
@@ -50,9 +50,10 @@ def NoImm : ImmType<0>;
def Imm8 : ImmType<1>;
def Imm8PCRel : ImmType<2>;
def Imm16 : ImmType<3>;
-def Imm32 : ImmType<4>;
-def Imm32PCRel : ImmType<5>;
-def Imm64 : ImmType<6>;
+def Imm16PCRel : ImmType<4>;
+def Imm32 : ImmType<5>;
+def Imm32PCRel : ImmType<6>;
+def Imm64 : ImmType<7>;
// FPFormat - This specifies what form this FP instruction has. This is used by
// the Floating-Point stackifier pass.
@@ -101,6 +102,11 @@ class XS { bits<4> Prefix = 12; }
class T8 { bits<4> Prefix = 13; }
class TA { bits<4> Prefix = 14; }
class TF { bits<4> Prefix = 15; }
+class VEX { bit hasVEXPrefix = 1; }
+class VEX_W { bit hasVEX_WPrefix = 1; }
+class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; }
+class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; }
+class VEX_L { bit hasVEX_L = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
string AsmStr, Domain d = GenericDomain>
@@ -128,6 +134,12 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
bits<2> SegOvrBits = 0; // Segment override prefix.
Domain ExeDomain = d;
+ bit hasVEXPrefix = 0; // Does this inst requires a VEX prefix?
+ bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
+ bit hasVEX_4VPrefix = 0; // Does this inst requires the VEX.VVVV field?
+ bit hasVEX_i8ImmReg = 0; // Does this inst requires the last source register
+ // to be encoded in a immediate field?
+ bit hasVEX_L = 0; // Does this inst uses large (256-bit) registers?
// TSFlags layout should be kept in sync with X86InstrInfo.h.
let TSFlags{5-0} = FormBits;
@@ -141,6 +153,11 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
let TSFlags{21-20} = SegOvrBits;
let TSFlags{23-22} = ExeDomain.Value;
let TSFlags{31-24} = Opcode;
+ let TSFlags{32} = hasVEXPrefix;
+ let TSFlags{33} = hasVEX_WPrefix;
+ let TSFlags{34} = hasVEX_4VPrefix;
+ let TSFlags{35} = hasVEX_i8ImmReg;
+ let TSFlags{36} = hasVEX_L;
}
class I<bits<8> o, Format f, dag outs, dag ins, string asm,
@@ -174,6 +191,13 @@ class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
+class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : X86Inst<o, f, Imm16PCRel, outs, ins, asm> {
+ let Pattern = pattern;
+ let CodeSize = 3;
+}
+
class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
list<dag> pattern>
: X86Inst<o, f, Imm32PCRel, outs, ins, asm> {
@@ -211,11 +235,56 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
let CodeSize = 3;
}
+// SI - SSE 1 & 2 scalar instructions
+class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
+ : I<o, F, outs, ins, asm, pattern> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// SIi8 - SSE 1 & 2 scalar instructions
+class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : Ii8<o, F, outs, ins, asm, pattern> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// PI - SSE 1 & 2 packed instructions
+class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
+ Domain d>
+ : I<o, F, outs, ins, asm, pattern, d> {
+ let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm);
+}
+
+// PIi8 - SSE 1 & 2 packed instructions with immediate
+class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern, Domain d>
+ : Ii8<o, F, outs, ins, asm, pattern, d> {
+ let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX],
+ !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm);
+}
+
// SSE1 Instruction Templates:
//
// SSI - SSE1 instructions with XS prefix.
// PSI - SSE1 instructions with TB prefix.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
+// VSSI - SSE1 instructions with XS prefix in AVX form.
+// VPSI - SSE1 instructions with TB prefix in AVX form.
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
@@ -229,6 +298,14 @@ class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: Ii8<o, F, outs, ins, asm, pattern, SSEPackedSingle>, TB,
Requires<[HasSSE1]>;
+class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS,
+ Requires<[HasAVX]>;
+class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>,
+ Requires<[HasAVX]>;
// SSE2 Instruction Templates:
//
@@ -237,6 +314,8 @@ class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
// PDI - SSE2 instructions with TB and OpSize prefixes.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
+// VSDI - SSE2 instructions with XD prefix in AVX form.
+// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
@@ -253,6 +332,14 @@ class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
list<dag> pattern>
: Ii8<o, F, outs, ins, asm, pattern, SSEPackedDouble>, TB, OpSize,
Requires<[HasSSE2]>;
+class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern>, XD,
+ Requires<[HasAVX]>;
+class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedDouble>,
+ OpSize, Requires<[HasAVX]>;
// SSE3 Instruction Templates:
//
diff --git a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 6b9478d..71c4e8b 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -60,3 +60,339 @@ def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
(vector_shuffle node:$lhs, node:$rhs), [{
return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
}], MMX_SHUFFLE_get_shuf_imm>;
+
+//===----------------------------------------------------------------------===//
+// SSE specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
+ SDTCisFP<0>, SDTCisInt<2> ]>;
+def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
+ SDTCisFP<1>, SDTCisVT<3, i8>]>;
+
+def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
+def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
+def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
+def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
+def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
+def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
+def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
+def X86pshufb : SDNode<"X86ISD::PSHUFB",
+ SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisSameAs<0,2>]>>;
+def X86pextrb : SDNode<"X86ISD::PEXTRB",
+ SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
+def X86pextrw : SDNode<"X86ISD::PEXTRW",
+ SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
+def X86pinsrb : SDNode<"X86ISD::PINSRB",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
+def X86pinsrw : SDNode<"X86ISD::PINSRW",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
+def X86insrtps : SDNode<"X86ISD::INSERTPS",
+ SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
+ SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
+def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
+ SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
+def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
+ [SDNPHasChain, SDNPMayLoad]>;
+def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
+def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
+def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
+def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
+def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
+def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
+def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
+def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
+def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
+
+def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
+ SDTCisVT<1, v4f32>,
+ SDTCisVT<2, v4f32>]>;
+def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
+
+//===----------------------------------------------------------------------===//
+// SSE Complex Patterns
+//===----------------------------------------------------------------------===//
+
+// These are 'extloads' from a scalar to the low element of a vector, zeroing
+// the top elements. These are used for the SSE 'ss' and 'sd' instruction
+// forms.
+def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
+ [SDNPHasChain, SDNPMayLoad]>;
+def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
+ [SDNPHasChain, SDNPMayLoad]>;
+
+def ssmem : Operand<v4f32> {
+ let PrintMethod = "printf32mem";
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+def sdmem : Operand<v2f64> {
+ let PrintMethod = "printf64mem";
+ let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE pattern fragments
+//===----------------------------------------------------------------------===//
+
+def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
+def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
+def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
+def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
+
+// FIXME: move this to a more appropriate place after all AVX is done.
+def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
+def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
+def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
+def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
+
+// Like 'store', but always requires vector alignment.
+def alignedstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+// Like 'load', but always requires vector alignment.
+def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+def alignedloadfsf32 : PatFrag<(ops node:$ptr),
+ (f32 (alignedload node:$ptr))>;
+def alignedloadfsf64 : PatFrag<(ops node:$ptr),
+ (f64 (alignedload node:$ptr))>;
+def alignedloadv4f32 : PatFrag<(ops node:$ptr),
+ (v4f32 (alignedload node:$ptr))>;
+def alignedloadv2f64 : PatFrag<(ops node:$ptr),
+ (v2f64 (alignedload node:$ptr))>;
+def alignedloadv4i32 : PatFrag<(ops node:$ptr),
+ (v4i32 (alignedload node:$ptr))>;
+def alignedloadv2i64 : PatFrag<(ops node:$ptr),
+ (v2i64 (alignedload node:$ptr))>;
+
+// FIXME: move this to a more appropriate place after all AVX is done.
+def alignedloadv8f32 : PatFrag<(ops node:$ptr),
+ (v8f32 (alignedload node:$ptr))>;
+def alignedloadv4f64 : PatFrag<(ops node:$ptr),
+ (v4f64 (alignedload node:$ptr))>;
+def alignedloadv8i32 : PatFrag<(ops node:$ptr),
+ (v8i32 (alignedload node:$ptr))>;
+def alignedloadv4i64 : PatFrag<(ops node:$ptr),
+ (v4i64 (alignedload node:$ptr))>;
+
+// Like 'load', but uses special alignment checks suitable for use in
+// memory operands in most SSE instructions, which are required to
+// be naturally aligned on some targets but not on others. If the subtarget
+// allows unaligned accesses, match any load, though this may require
+// setting a feature bit in the processor (on startup, for example).
+// Opteron 10h and later implement such a feature.
+def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return Subtarget->hasVectorUAMem()
+ || cast<LoadSDNode>(N)->getAlignment() >= 16;
+}]>;
+
+def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
+def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
+def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
+def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
+def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
+def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
+def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
+
+// FIXME: move this to a more appropriate place after all AVX is done.
+def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
+def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
+
+// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
+// 16-byte boundary.
+// FIXME: 8 byte alignment for mmx reads is not required
+def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAlignment() >= 8;
+}]>;
+
+def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
+def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
+def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
+def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
+
+// MOVNT Support
+// Like 'store', but requires the non-temporal bit to be set
+def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal();
+ return false;
+}]>;
+
+def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal() && !ST->isTruncatingStore() &&
+ ST->getAddressingMode() == ISD::UNINDEXED &&
+ ST->getAlignment() >= 16;
+ return false;
+}]>;
+
+def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ return ST->isNonTemporal() &&
+ ST->getAlignment() < 16;
+ return false;
+}]>;
+
+def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
+def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
+def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
+def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
+def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
+def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
+
+def vzmovl_v2i64 : PatFrag<(ops node:$src),
+ (bitconvert (v2i64 (X86vzmovl
+ (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
+def vzmovl_v4i32 : PatFrag<(ops node:$src),
+ (bitconvert (v4i32 (X86vzmovl
+ (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
+
+def vzload_v2i64 : PatFrag<(ops node:$src),
+ (bitconvert (v2i64 (X86vzload node:$src)))>;
+
+
+def fp32imm0 : PatLeaf<(f32 fpimm), [{
+ return N->isExactlyValue(+0.0);
+}]>;
+
+// BYTE_imm - Transform bit immediates into byte immediates.
+def BYTE_imm : SDNodeXForm<imm, [{
+ // Transformation function: imm >> 3
+ return getI32Imm(N->getZExtValue() >> 3);
+}]>;
+
+// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
+// SHUFP* etc. imm.
+def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShuffleSHUFImmediate(N));
+}]>;
+
+// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
+// PSHUFHW imm.
+def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
+}]>;
+
+// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
+// PSHUFLW imm.
+def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
+}]>;
+
+// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
+// a PALIGNR imm.
+def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
+ return getI8Imm(X86::getShufflePALIGNRImmediate(N));
+}]>;
+
+def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
+}]>;
+
+def movddup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movlp : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movl : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
+}]>;
+
+def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_shuf_imm>;
+
+def shufp : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_shuf_imm>;
+
+def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_pshufhw_imm>;
+
+def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_pshuflw_imm>;
+
+def palign : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
+}], SHUFFLE_get_palign_imm>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
index 34e12ca..ce471ea 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -784,7 +784,9 @@ static bool isFrameLoadOpcode(int Opcode) {
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
+ case X86::MOV32rm_TC:
case X86::MOV64rm:
+ case X86::MOV64rm_TC:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
@@ -805,7 +807,9 @@ static bool isFrameStoreOpcode(int Opcode) {
case X86::MOV8mr:
case X86::MOV16mr:
case X86::MOV32mr:
+ case X86::MOV32mr_TC:
case X86::MOV64mr:
+ case X86::MOV64mr_TC:
case X86::ST_FpP64m:
case X86::MOVSSmr:
case X86::MOVSDmr:
@@ -863,7 +867,7 @@ unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
if (isFrameStoreOpcode(MI->getOpcode()))
if (isFrameOperand(MI, 0, FrameIndex))
- return MI->getOperand(X86AddrNumOperands).getReg();
+ return MI->getOperand(X86::AddrNumOperands).getReg();
return 0;
}
@@ -1064,14 +1068,9 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo &TRI) const {
DebugLoc DL = Orig->getDebugLoc();
- if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
- DestReg = TRI->getSubReg(DestReg, SubIdx);
- SubIdx = 0;
- }
-
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// Re-materialize them as movri instructions to avoid side effects.
bool Clone = true;
@@ -1098,14 +1097,13 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
if (Clone) {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
- MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
} else {
- BuildMI(MBB, I, DL, get(Opc), DestReg).addImm(0);
+ BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
}
MachineInstr *NewMI = prior(I);
- NewMI->getOperand(0).setSubReg(SubIdx);
+ NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
@@ -1151,10 +1149,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// least on modern x86 machines).
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
- .addReg(leaInReg)
- .addReg(Src, getKillRegState(isKill))
- .addImm(X86::sub_16bit);
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg, RegState::Define, X86::sub_16bit)
+ .addReg(Src, getKillRegState(isKill));
MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
get(Opc), leaOutReg);
@@ -1165,20 +1162,20 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::SHL16ri: {
unsigned ShAmt = MI->getOperand(2).getImm();
MIB.addReg(0).addImm(1 << ShAmt)
- .addReg(leaInReg, RegState::Kill).addImm(0);
+ .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
case X86::INC16r:
case X86::INC64_16r:
- addLeaRegOffset(MIB, leaInReg, true, 1);
+ addRegOffset(MIB, leaInReg, true, 1);
break;
case X86::DEC16r:
case X86::DEC64_16r:
- addLeaRegOffset(MIB, leaInReg, true, -1);
+ addRegOffset(MIB, leaInReg, true, -1);
break;
case X86::ADD16ri:
case X86::ADD16ri8:
- addLeaRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
+ addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
break;
case X86::ADD16rr: {
unsigned Src2 = MI->getOperand(2).getReg();
@@ -1195,10 +1192,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
// well be shifting and then extracting the lower 16-bits.
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
InsMI2 =
- BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg2)
- .addReg(leaInReg2)
- .addReg(Src2, getKillRegState(isKill2))
- .addImm(X86::sub_16bit);
+ BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
+ .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
+ .addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
}
if (LV && isKill2 && InsMI2)
@@ -1209,10 +1205,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
- BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
- .addReg(leaOutReg, RegState::Kill)
- .addImm(X86::sub_16bit);
+ .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
if (LV) {
// Update live variables
@@ -1283,7 +1278,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, getKillRegState(isKill))
- .addImm(0);
+ .addImm(0).addReg(0);
break;
}
case X86::SHL32ri: {
@@ -1297,7 +1292,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
- .addReg(Src, getKillRegState(isKill)).addImm(0);
+ .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
break;
}
case X86::SHL16ri: {
@@ -1313,7 +1308,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(0).addImm(1 << ShAmt)
.addReg(Src, getKillRegState(isKill))
- .addImm(0);
+ .addImm(0).addReg(0);
break;
}
default: {
@@ -1331,7 +1326,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, 1);
@@ -1353,7 +1348,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, -1);
@@ -1401,7 +1396,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD64ri32:
case X86::ADD64ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1410,7 +1405,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
case X86::ADD32ri8: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1421,7 +1416,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
.addReg(Dest, RegState::Define |
getDeadRegState(isDead)),
Src, isKill, MI->getOperand(2).getImm());
@@ -1845,9 +1840,8 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -1856,7 +1850,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
return 1;
}
@@ -1866,27 +1860,27 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
switch (CC) {
case X86::COND_NP_OR_E:
// Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, dl, get(X86::JNP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
++Count;
break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
- BuildMI(&MBB, dl, get(X86::JNE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
++Count;
break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
- BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
++Count;
}
return Count;
@@ -1897,237 +1891,153 @@ static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
}
-bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
-
- // Determine if DstRC and SrcRC have a common superclass in common.
- const TargetRegisterClass *CommonRC = DestRC;
- if (DestRC == SrcRC)
- /* Source and destination have the same register class. */;
- else if (CommonRC->hasSuperClass(SrcRC))
- CommonRC = SrcRC;
- else if (!DestRC->hasSubClass(SrcRC)) {
- // Neither of GR64_NOREX or GR64_NOSP is a superclass of the other,
- // but we want to copy them as GR64. Similarly, for GR32_NOREX and
- // GR32_NOSP, copy as GR32.
- if (SrcRC->hasSuperClass(&X86::GR64RegClass) &&
- DestRC->hasSuperClass(&X86::GR64RegClass))
- CommonRC = &X86::GR64RegClass;
- else if (SrcRC->hasSuperClass(&X86::GR32RegClass) &&
- DestRC->hasSuperClass(&X86::GR32RegClass))
- CommonRC = &X86::GR32RegClass;
+void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ // First deal with the normal symmetric copies.
+ unsigned Opc = 0;
+ if (X86::GR64RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV64rr;
+ else if (X86::GR32RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV32rr;
+ else if (X86::GR16RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOV16rr;
+ else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
+ // Copying to or from a physical H register on x86-64 requires a NOREX
+ // move. Otherwise use a normal move.
+ if ((isHReg(DestReg) || isHReg(SrcReg)) &&
+ TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rr_NOREX;
else
- CommonRC = 0;
- }
-
- if (CommonRC) {
- unsigned Opc;
- if (CommonRC == &X86::GR64RegClass || CommonRC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32RegClass ||
- CommonRC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16RegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if ((isHReg(DestReg) || isHReg(SrcReg)) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rr_NOREX;
- else
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8_ABCD_LRegClass) {
Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rr_NOREX;
- else
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR64_NOREXRegClass ||
- CommonRC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rr;
- } else if (CommonRC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rr;
- } else if (CommonRC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rr;
- } else if (CommonRC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8rr;
- } else if (CommonRC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64rr_TC;
- } else if (CommonRC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32rr_TC;
- } else if (CommonRC == &X86::RFP32RegClass) {
- Opc = X86::MOV_Fp3232;
- } else if (CommonRC == &X86::RFP64RegClass || CommonRC == &X86::RSTRegClass) {
- Opc = X86::MOV_Fp6464;
- } else if (CommonRC == &X86::RFP80RegClass) {
- Opc = X86::MOV_Fp8080;
- } else if (CommonRC == &X86::FR32RegClass) {
- Opc = X86::FsMOVAPSrr;
- } else if (CommonRC == &X86::FR64RegClass) {
- Opc = X86::FsMOVAPDrr;
- } else if (CommonRC == &X86::VR128RegClass) {
- Opc = X86::MOVAPSrr;
- } else if (CommonRC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rr;
- } else {
- return false;
- }
- BuildMI(MBB, MI, DL, get(Opc), DestReg).addReg(SrcReg);
- return true;
+ } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MOVAPSrr;
+ else if (X86::VR64RegClass.contains(DestReg, SrcReg))
+ Opc = X86::MMX_MOVQ64rr;
+
+ if (Opc) {
+ BuildMI(MBB, MI, DL, get(Opc), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
// Moving EFLAGS to / from another register requires a push and a pop.
- if (SrcRC == &X86::CCRRegClass) {
- if (SrcReg != X86::EFLAGS)
- return false;
- if (DestRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) {
+ if (SrcReg == X86::EFLAGS) {
+ if (X86::GR64RegClass.contains(DestReg)) {
BuildMI(MBB, MI, DL, get(X86::PUSHF64));
BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
- return true;
- } else if (DestRC == &X86::GR32RegClass ||
- DestRC == &X86::GR32_NOSPRegClass) {
+ return;
+ } else if (X86::GR32RegClass.contains(DestReg)) {
BuildMI(MBB, MI, DL, get(X86::PUSHF32));
BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
- return true;
+ return;
}
- } else if (DestRC == &X86::CCRRegClass) {
- if (DestReg != X86::EFLAGS)
- return false;
- if (SrcRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSH64r)).addReg(SrcReg);
+ }
+ if (DestReg == X86::EFLAGS) {
+ if (X86::GR64RegClass.contains(SrcReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSH64r))
+ .addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, MI, DL, get(X86::POPF64));
- return true;
- } else if (SrcRC == &X86::GR32RegClass ||
- DestRC == &X86::GR32_NOSPRegClass) {
- BuildMI(MBB, MI, DL, get(X86::PUSH32r)).addReg(SrcReg);
+ return;
+ } else if (X86::GR32RegClass.contains(SrcReg)) {
+ BuildMI(MBB, MI, DL, get(X86::PUSH32r))
+ .addReg(SrcReg, getKillRegState(KillSrc));
BuildMI(MBB, MI, DL, get(X86::POPF32));
- return true;
- }
- }
-
- // Moving from ST(0) turns into FpGET_ST0_32 etc.
- if (SrcRC == &X86::RSTRegClass) {
- // Copying from ST(0)/ST(1).
- if (SrcReg != X86::ST0 && SrcReg != X86::ST1)
- // Can only copy from ST(0)/ST(1) right now
- return false;
- bool isST0 = SrcReg == X86::ST0;
- unsigned Opc;
- if (DestRC == &X86::RFP32RegClass)
- Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32;
- else if (DestRC == &X86::RFP64RegClass)
- Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64;
- else {
- if (DestRC != &X86::RFP80RegClass)
- return false;
- Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80;
+ return;
}
- BuildMI(MBB, MI, DL, get(Opc), DestReg);
- return true;
}
- // Moving to ST(0) turns into FpSET_ST0_32 etc.
- if (DestRC == &X86::RSTRegClass) {
- // Copying to ST(0) / ST(1).
- if (DestReg != X86::ST0 && DestReg != X86::ST1)
- // Can only copy to TOS right now
- return false;
- bool isST0 = DestReg == X86::ST0;
- unsigned Opc;
- if (SrcRC == &X86::RFP32RegClass)
- Opc = isST0 ? X86::FpSET_ST0_32 : X86::FpSET_ST1_32;
- else if (SrcRC == &X86::RFP64RegClass)
- Opc = isST0 ? X86::FpSET_ST0_64 : X86::FpSET_ST1_64;
- else {
- if (SrcRC != &X86::RFP80RegClass)
- return false;
- Opc = isST0 ? X86::FpSET_ST0_80 : X86::FpSET_ST1_80;
- }
- BuildMI(MBB, MI, DL, get(Opc)).addReg(SrcReg);
- return true;
- }
-
- // Not yet supported!
- return false;
+ DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
+ << " to " << RI.getName(DestReg) << '\n');
+ llvm_unreachable("Cannot emit physreg copy instruction");
}
-static unsigned getStoreRegOpcode(unsigned SrcReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8RegClass) {
+static unsigned getLoadStoreRegOpcode(unsigned Reg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM,
+ bool load) {
+ switch (RC->getID()) {
+ default:
+ llvm_unreachable("Unknown regclass");
+ case X86::GR64RegClassID:
+ case X86::GR64_NOSPRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32RegClassID:
+ case X86::GR32_NOSPRegClassID:
+ case X86::GR32_ADRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16RegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8RegClassID:
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
- if (isHReg(SrcReg) &&
+ if (isHReg(Reg) &&
TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_ABCDRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32_ABCDRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16_ABCDRegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8_ABCD_LRegClassID:
+ return load ? X86::MOV8rm :X86::MOV8mr;
+ case X86::GR8_ABCD_HRegClassID:
if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64mr;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32mr;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16mr;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8mr;
- } else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64mr_TC;
- } else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32mr_TC;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::ST_FpP80m; // pops
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::ST_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::ST_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSmr;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDmr;
- } else if (RC == &X86::VR128RegClass) {
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_NOREXRegClassID:
+ case X86::GR64_NOREX_NOSPRegClassID:
+ return load ? X86::MOV64rm : X86::MOV64mr;
+ case X86::GR32_NOREXRegClassID:
+ return load ? X86::MOV32rm : X86::MOV32mr;
+ case X86::GR16_NOREXRegClassID:
+ return load ? X86::MOV16rm : X86::MOV16mr;
+ case X86::GR8_NOREXRegClassID:
+ return load ? X86::MOV8rm : X86::MOV8mr;
+ case X86::GR64_TCRegClassID:
+ return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
+ case X86::GR32_TCRegClassID:
+ return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
+ case X86::RFP80RegClassID:
+ return load ? X86::LD_Fp80m : X86::ST_FpP80m;
+ case X86::RFP64RegClassID:
+ return load ? X86::LD_Fp64m : X86::ST_Fp64m;
+ case X86::RFP32RegClassID:
+ return load ? X86::LD_Fp32m : X86::ST_Fp32m;
+ case X86::FR32RegClassID:
+ return load ? X86::MOVSSrm : X86::MOVSSmr;
+ case X86::FR64RegClassID:
+ return load ? X86::MOVSDrm : X86::MOVSDmr;
+ case X86::VR128RegClassID:
// If stack is realigned we can use aligned stores.
- Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64mr;
- } else {
- llvm_unreachable("Unknown regclass");
+ if (isStackAligned)
+ return load ? X86::MOVAPSrm : X86::MOVAPSmr;
+ else
+ return load ? X86::MOVUPSrm : X86::MOVUPSmr;
+ case X86::VR64RegClassID:
+ return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
}
+}
+
+static unsigned getStoreRegOpcode(unsigned SrcReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ TargetMachine &TM) {
+ return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
+}
- return Opc;
+
+static unsigned getLoadRegOpcode(unsigned DestReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM) {
+ return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -2150,7 +2060,7 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- bool isAligned = (*MMOBegin)->getAlignment() >= 16;
+ bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
@@ -2161,72 +2071,6 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(unsigned DestReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- const TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if (isHReg(DestReg) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64rm_TC;
- } else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32rm_TC;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::LD_Fp80m;
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::LD_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::LD_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSrm;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDrm;
- } else if (RC == &X86::VR128RegClass) {
- // If stack is realigned we can use aligned loads.
- Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rm;
- } else {
- llvm_unreachable("Unknown regclass");
- }
-
- return Opc;
-}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
@@ -2246,7 +2090,7 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- bool isAligned = (*MMOBegin)->getAlignment() >= 16;
+ bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
@@ -2277,18 +2121,17 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
unsigned Reg = CSI[i-1].getReg();
- const TargetRegisterClass *RegClass = CSI[i-1].getRegClass();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
if (Reg == FPReg)
// X86RegisterInfo::emitPrologue will handle spilling of frame register.
continue;
- if (RegClass != &X86::VR128RegClass && !isWin64) {
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
CalleeFrameSize += SlotSize;
BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
} else {
- storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), RegClass,
- &RI);
+ storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
+ &X86::VR128RegClass, &RI);
}
}
@@ -2315,11 +2158,11 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
if (Reg == FPReg)
// X86RegisterInfo::emitEpilogue will handle restoring of frame register.
continue;
- const TargetRegisterClass *RegClass = CSI[i].getRegClass();
- if (RegClass != &X86::VR128RegClass && !isWin64) {
+ if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
BuildMI(MBB, MI, DL, get(Opc), Reg);
} else {
- loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass, &RI);
+ loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
+ &X86::VR128RegClass, &RI);
}
}
return true;
@@ -2492,7 +2335,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
}
// No fusion
- if (PrintFailedFusing)
+ if (PrintFailedFusing && !MI->isCopy())
dbgs() << "We failed to fuse operand " << i << " in " << *MI;
return NULL;
}
@@ -2610,7 +2453,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
} else if (Ops.size() != 1)
return NULL;
- SmallVector<MachineOperand,X86AddrNumOperands> MOs;
+ SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
switch (LoadMI->getOpcode()) {
case X86::V_SET0PS:
case X86::V_SET0PD:
@@ -2632,7 +2475,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (TM.getSubtarget<X86Subtarget>().is64Bit())
PICBase = X86::RIP;
else
- // FIXME: PICBase = TM.getInstrInfo()->getGlobalBaseReg(&MF);
+ // FIXME: PICBase = getGlobalBaseReg(&MF);
// This doesn't work for several reasons.
// 1. GlobalBaseReg may have been spilled.
// 2. It may not be live at MI.
@@ -2664,7 +2507,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
default: {
// Folding a normal load. Just copy the load's address operands.
unsigned NumOps = LoadMI->getDesc().getNumOperands();
- for (unsigned i = NumOps - X86AddrNumOperands; i != NumOps; ++i)
+ for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
MOs.push_back(LoadMI->getOperand(i));
break;
}
@@ -2727,7 +2570,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
if (I != OpcodeTablePtr->end())
return true;
}
- return false;
+ return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
}
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
@@ -2751,13 +2594,20 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
const TargetInstrDesc &TID = get(Opc);
const TargetOperandInfo &TOI = TID.OpInfo[Index];
const TargetRegisterClass *RC = TOI.getRegClass(&RI);
- SmallVector<MachineOperand, X86AddrNumOperands> AddrOps;
+ if (!MI->hasOneMemOperand() &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
+ // conservatively assume the address is unaligned. That's bad for
+ // performance.
+ return false;
+ SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
- if (i >= Index && i < Index + X86AddrNumOperands)
+ if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
@@ -2776,7 +2626,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
- for (unsigned i = 1; i != 1 + X86AddrNumOperands; ++i) {
+ for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
if (MO.isReg())
MO.setIsKill(false);
@@ -2873,7 +2723,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
- if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
+ if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
@@ -2892,7 +2742,12 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
MachineInstr::mmo_iterator> MMOs =
MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
- bool isAligned = (*MMOs.first)->getAlignment() >= 16;
+ if (!(*MMOs.first) &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Do not introduce a slow unaligned load.
+ return false;
+ bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
VT, MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
@@ -2929,7 +2784,12 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
- bool isAligned = (*MMOs.first)->getAlignment() >= 16;
+ if (!(*MMOs.first) &&
+ RC == &X86::VR128RegClass &&
+ !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
+ // Do not introduce a slow unaligned store.
+ return false;
+ bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
isAligned, TM),
dl, MVT::Other,
@@ -3065,16 +2925,16 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
EVT VT = Load1->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
- default: {
+ default:
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
// have 16 of them to play with.
if (TM.getSubtargetImpl()->is64Bit()) {
if (NumLoads >= 3)
return false;
- } else if (NumLoads)
+ } else if (NumLoads) {
return false;
+ }
break;
- }
case MVT::i8:
case MVT::i16:
case MVT::i32:
@@ -3083,6 +2943,7 @@ bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
case MVT::f64:
if (NumLoads)
return false;
+ break;
}
return true;
@@ -3123,6 +2984,8 @@ bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) {
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
+ case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
+ case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
return true;
}
return false;
@@ -3194,7 +3057,7 @@ unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
+ unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
i = isTwoAddr ? 1 : 0;
if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
REX |= 1 << 2;
@@ -3546,7 +3409,7 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
case X86II::MRMDestMem: {
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += X86AddrNumOperands + 1;
+ CurOp += X86::AddrNumOperands + 1;
if (CurOp != NumOps) {
++CurOp;
FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
@@ -3565,16 +3428,9 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
break;
case X86II::MRMSrcMem: {
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
-
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
- CurOp += AddrOperands + 1;
+ CurOp += X86::AddrNumOperands + 1;
if (CurOp != NumOps) {
++CurOp;
FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags));
@@ -3628,7 +3484,7 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += X86AddrNumOperands;
+ CurOp += X86::AddrNumOperands;
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
@@ -3694,6 +3550,8 @@ unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
+/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
+///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
"X86-64 PIC uses RIP relative addressing");
@@ -3703,30 +3561,10 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
if (GlobalBaseReg != 0)
return GlobalBaseReg;
- // Insert the set of GlobalBaseReg into the first MBB of the function
- MachineBasicBlock &FirstMBB = MF->front();
- MachineBasicBlock::iterator MBBI = FirstMBB.begin();
- DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
+ // Create the register. The code to initialize it is inserted
+ // later, by the CGBR pass (below).
MachineRegisterInfo &RegInfo = MF->getRegInfo();
- unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
-
- const TargetInstrInfo *TII = TM.getInstrInfo();
- // Operand of MovePCtoStack is completely ignored by asm printer. It's
- // only used in JIT code emission as displacement to pc.
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
-
- // If we're using vanilla 'GOT' PIC style, we should use relative addressing
- // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
- if (TM.getSubtarget<X86Subtarget>().isPICStyleGOT()) {
- GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
- // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
- BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
- .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
- X86II::MO_GOT_ABSOLUTE_ADDRESS);
- } else {
- GlobalBaseReg = PC;
- }
-
+ GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
X86FI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
@@ -3784,3 +3622,65 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
+namespace {
+ /// CGBR - Create Global Base Reg pass. This initializes the PIC
+ /// global base register for x86-32.
+ struct CGBR : public MachineFunctionPass {
+ static char ID;
+ CGBR() : MachineFunctionPass(&ID) {}
+
+ virtual bool runOnMachineFunction(MachineFunction &MF) {
+ const X86TargetMachine *TM =
+ static_cast<const X86TargetMachine *>(&MF.getTarget());
+
+ assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
+ "X86-64 PIC uses RIP relative addressing");
+
+ // Only emit a global base reg in PIC mode.
+ if (TM->getRelocationModel() != Reloc::PIC_)
+ return false;
+
+ // Insert the set of GlobalBaseReg into the first MBB of the function
+ MachineBasicBlock &FirstMBB = MF.front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ const X86InstrInfo *TII = TM->getInstrInfo();
+
+ unsigned PC;
+ if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
+ PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ else
+ PC = TII->getGlobalBaseReg(&MF);
+
+ // Operand of MovePCtoStack is completely ignored by asm printer. It's
+ // only used in JIT code emission as displacement to pc.
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
+
+ // If we're using vanilla 'GOT' PIC style, we should use relative addressing
+ // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
+ if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
+ unsigned GlobalBaseReg = TII->getGlobalBaseReg(&MF);
+ // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
+ .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
+ X86II::MO_GOT_ABSOLUTE_ADDRESS);
+ }
+
+ return true;
+ }
+
+ virtual const char *getPassName() const {
+ return "X86 PIC Global Base Reg Initialization";
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ };
+}
+
+char CGBR::ID = 0;
+FunctionPass*
+llvm::createGlobalBaseRegPass() { return new CGBR(); }
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.h b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
index 62d7c74..ad0217a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.h
@@ -24,6 +24,24 @@ namespace llvm {
class X86TargetMachine;
namespace X86 {
+ // Enums for memory operand decoding. Each memory operand is represented with
+ // a 5 operand sequence in the form:
+ // [BaseReg, ScaleAmt, IndexReg, Disp, Segment]
+ // These enums help decode this.
+ enum {
+ AddrBaseReg = 0,
+ AddrScaleAmt = 1,
+ AddrIndexReg = 2,
+ AddrDisp = 3,
+
+ /// AddrSegmentReg - The operand # of the segment in the memory operand.
+ AddrSegmentReg = 4,
+
+ /// AddrNumOperands - Total number of operands in a memory reference.
+ AddrNumOperands = 5
+ };
+
+
// X86 specific condition code. These correspond to X86_*_COND in
// X86InstrInfo.td. They must be kept in synch.
enum CondCode {
@@ -173,7 +191,19 @@ namespace X86II {
/// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE",
/// which is a PIC-base-relative reference to a hidden dyld lazy pointer
/// stub.
- MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE
+ MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE,
+
+ /// MO_TLVP - On a symbol operand this indicates that the immediate is
+ /// some TLS offset.
+ ///
+ /// This is the TLS offset for the Darwin TLS mechanism.
+ MO_TLVP,
+
+ /// MO_TLVP_PIC_BASE - On a symbol operand this indicates that the immediate
+ /// is some TLS offset from the picbase.
+ ///
+ /// This is the 32-bit TLS offset for Darwin TLS in PIC mode.
+ MO_TLVP_PIC_BASE
};
}
@@ -203,6 +233,7 @@ inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) {
case X86II::MO_PIC_BASE_OFFSET: // Darwin local global.
case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global.
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global.
+ case X86II::MO_TLVP: // ??? Pretty sure..
return true;
default:
return false;
@@ -347,9 +378,10 @@ namespace X86II {
Imm8 = 1 << ImmShift,
Imm8PCRel = 2 << ImmShift,
Imm16 = 3 << ImmShift,
- Imm32 = 4 << ImmShift,
- Imm32PCRel = 5 << ImmShift,
- Imm64 = 6 << ImmShift,
+ Imm16PCRel = 4 << ImmShift,
+ Imm32 = 5 << ImmShift,
+ Imm32PCRel = 6 << ImmShift,
+ Imm64 = 7 << ImmShift,
//===------------------------------------------------------------------===//
// FP Instruction Classification... Zero is non-fp instruction.
@@ -403,28 +435,53 @@ namespace X86II {
SSEDomainShift = 22,
OpcodeShift = 24,
- OpcodeMask = 0xFF << OpcodeShift
+ OpcodeMask = 0xFF << OpcodeShift,
+
+ //===------------------------------------------------------------------===//
+ // VEX - The opcode prefix used by AVX instructions
+ VEX = 1ULL << 32,
+
+ // VEX_W - Has a opcode specific functionality, but is used in the same
+ // way as REX_W is for regular SSE instructions.
+ VEX_W = 1ULL << 33,
+
+ // VEX_4V - Used to specify an additional AVX/SSE register. Several 2
+ // address instructions in SSE are represented as 3 address ones in AVX
+ // and the additional register is encoded in VEX_VVVV prefix.
+ VEX_4V = 1ULL << 34,
+
+ // VEX_I8IMM - Specifies that the last register used in a AVX instruction,
+ // must be encoded in the i8 immediate field. This usually happens in
+ // instructions with 4 operands.
+ VEX_I8IMM = 1ULL << 35,
+
+ // VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
+ // instruction uses 256-bit wide registers. This is usually auto detected if
+ // a VR256 register is used, but some AVX instructions also have this field
+ // marked when using a f256 memory references.
+ VEX_L = 1ULL << 36
};
// getBaseOpcodeFor - This function returns the "base" X86 opcode for the
// specified machine instruction.
//
- static inline unsigned char getBaseOpcodeFor(unsigned TSFlags) {
+ static inline unsigned char getBaseOpcodeFor(uint64_t TSFlags) {
return TSFlags >> X86II::OpcodeShift;
}
- static inline bool hasImm(unsigned TSFlags) {
+ static inline bool hasImm(uint64_t TSFlags) {
return (TSFlags & X86II::ImmMask) != 0;
}
/// getSizeOfImm - Decode the "size of immediate" field from the TSFlags field
/// of the specified instruction.
- static inline unsigned getSizeOfImm(unsigned TSFlags) {
+ static inline unsigned getSizeOfImm(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
default: assert(0 && "Unknown immediate size");
case X86II::Imm8:
case X86II::Imm8PCRel: return 1;
- case X86II::Imm16: return 2;
+ case X86II::Imm16:
+ case X86II::Imm16PCRel: return 2;
case X86II::Imm32:
case X86II::Imm32PCRel: return 4;
case X86II::Imm64: return 8;
@@ -433,23 +490,77 @@ namespace X86II {
/// isImmPCRel - Return true if the immediate of the specified instruction's
/// TSFlags indicates that it is pc relative.
- static inline unsigned isImmPCRel(unsigned TSFlags) {
+ static inline unsigned isImmPCRel(uint64_t TSFlags) {
switch (TSFlags & X86II::ImmMask) {
- default: assert(0 && "Unknown immediate size");
- case X86II::Imm8PCRel:
- case X86II::Imm32PCRel:
- return true;
- case X86II::Imm8:
- case X86II::Imm16:
- case X86II::Imm32:
- case X86II::Imm64:
- return false;
+ default: assert(0 && "Unknown immediate size");
+ case X86II::Imm8PCRel:
+ case X86II::Imm16PCRel:
+ case X86II::Imm32PCRel:
+ return true;
+ case X86II::Imm8:
+ case X86II::Imm16:
+ case X86II::Imm32:
+ case X86II::Imm64:
+ return false;
+ }
+ }
+
+ /// getMemoryOperandNo - The function returns the MCInst operand # for the
+ /// first field of the memory operand. If the instruction doesn't have a
+ /// memory operand, this returns -1.
+ ///
+ /// Note that this ignores tied operands. If there is a tied register which
+ /// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
+ /// counted as one operand.
+ ///
+ static inline int getMemoryOperandNo(uint64_t TSFlags) {
+ switch (TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg: assert(0 && "FIXME: Remove this form");
+ default: assert(0 && "Unknown FormMask value in getMemoryOperandNo!");
+ case X86II::Pseudo:
+ case X86II::RawFrm:
+ case X86II::AddRegFrm:
+ case X86II::MRMDestReg:
+ case X86II::MRMSrcReg:
+ return -1;
+ case X86II::MRMDestMem:
+ return 0;
+ case X86II::MRMSrcMem: {
+ bool HasVEX_4V = TSFlags & X86II::VEX_4V;
+ unsigned FirstMemOp = 1;
+ if (HasVEX_4V)
+ ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV).
+
+ // FIXME: Maybe lea should have its own form? This is a horrible hack.
+ //if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
+ // Opcode == X86::LEA16r || Opcode == X86::LEA32r)
+ return FirstMemOp;
}
- }
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ return -1;
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m:
+ return 0;
+ case X86II::MRM_C1:
+ case X86II::MRM_C2:
+ case X86II::MRM_C3:
+ case X86II::MRM_C4:
+ case X86II::MRM_C8:
+ case X86II::MRM_C9:
+ case X86II::MRM_E8:
+ case X86II::MRM_F0:
+ case X86II::MRM_F8:
+ case X86II::MRM_F9:
+ return -1;
+ }
+ }
}
-const int X86AddrNumOperands = 5;
-
inline static bool isScale(const MachineOperand &MO) {
return MO.isImm() &&
(MO.getImm() == 1 || MO.getImm() == 2 ||
@@ -555,7 +666,7 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterInfo &TRI) const;
/// convertToThreeAddress - This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
@@ -585,13 +696,12 @@ public:
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
diff --git a/contrib/llvm/lib/Target/X86/X86InstrInfo.td b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
index 0d59c42..1efef5a 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrInfo.td
@@ -72,6 +72,8 @@ def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
def SDT_X86SegmentBaseAddress : SDTypeProfile<1, 1, [SDTCisPtrTy<0>]>;
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
@@ -182,6 +184,9 @@ def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
def X86MingwAlloca : SDNode<"X86ISD::MINGW_ALLOCA", SDTX86Void,
[SDNPHasChain, SDNPInFlag, SDNPOutFlag]>;
+
+def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL,
+ []>;
//===----------------------------------------------------------------------===//
// X86 Operand Definitions.
@@ -197,13 +202,9 @@ def X86MemAsmOperand : AsmOperandClass {
let Name = "Mem";
let SuperClasses = [];
}
-def X86NoSegMemAsmOperand : AsmOperandClass {
- let Name = "NoSegMem";
- let SuperClasses = [X86MemAsmOperand];
-}
def X86AbsMemAsmOperand : AsmOperandClass {
let Name = "AbsMem";
- let SuperClasses = [X86NoSegMemAsmOperand];
+ let SuperClasses = [X86MemAsmOperand];
}
class X86MemOperand<string printMethod> : Operand<iPTR> {
let PrintMethod = printMethod;
@@ -226,7 +227,7 @@ def f32mem : X86MemOperand<"printf32mem">;
def f64mem : X86MemOperand<"printf64mem">;
def f80mem : X86MemOperand<"printf80mem">;
def f128mem : X86MemOperand<"printf128mem">;
-//def f256mem : X86MemOperand<"printf256mem">;
+def f256mem : X86MemOperand<"printf256mem">;
// A version of i8mem for use on x86-64 that uses GR64_NOREX instead of
// plain GR64, so that it doesn't potentially require a REX prefix.
@@ -245,15 +246,11 @@ def i32mem_TC : Operand<i32> {
let ParserMatchClass = X86MemAsmOperand;
}
-def lea32mem : Operand<i32> {
- let PrintMethod = "printlea32mem";
- let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
- let ParserMatchClass = X86NoSegMemAsmOperand;
-}
let ParserMatchClass = X86AbsMemAsmOperand,
PrintMethod = "print_pcrel_imm" in {
def i32imm_pcrel : Operand<i32>;
+def i16imm_pcrel : Operand<i16>;
def offset8 : Operand<i64>;
def offset16 : Operand<i64>;
@@ -283,26 +280,31 @@ class ImmSExtAsmOperandClass : AsmOperandClass {
// 64-bit immediates, but for a 16-bit target value we want to accept both "-1"
// (which will be a -1ULL), and "0xFF" (-1 in 16-bits).
-// [0, 0x7FFFFFFF] | [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x7FFFFFFF] |
+// [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF]
def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti64i32";
}
-// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti16i8";
let SuperClasses = [ImmSExti64i32AsmOperand];
}
-// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti32i8";
}
-// [0, 0x0000007F] | [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
+// [0, 0x0000007F] |
+// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF]
def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass {
let Name = "ImmSExti64i8";
- let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand, ImmSExti64i32AsmOperand];
+ let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand,
+ ImmSExti64i32AsmOperand];
}
// A couple of more descriptive operand definitions.
@@ -321,10 +323,10 @@ def i32i8imm : Operand<i32> {
// Define X86 specific addressing mode.
def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
-def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
+def lea32addr : ComplexPattern<i32, 5, "SelectLEAAddr",
[add, sub, mul, X86mul_imm, shl, or, frameindex],
[]>;
-def tls32addr : ComplexPattern<i32, 4, "SelectTLSADDRAddr",
+def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr",
[tglobaltlsaddr], []>;
//===----------------------------------------------------------------------===//
@@ -704,6 +706,12 @@ let isCall = 1 in
"lcall{w}\t{*}$dst", []>, OpSize;
def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst),
"lcall{l}\t{*}$dst", []>;
+
+ // callw for 16 bit code for the assembler.
+ let isAsmParserOnly = 1 in
+ def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm,
+ (outs), (ins i16imm_pcrel:$dst, variable_ops),
+ "callw\t$dst", []>, OpSize;
}
// Constructing a stack frame.
@@ -737,18 +745,10 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
"jmp\t$dst # TAILCALL",
[]>;
def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
- "jmp{l}\t{*}$dst # TAILCALL",
- []>;
+ "", []>; // FIXME: Remove encoding when JIT is dead.
let mayLoad = 1 in
def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
"jmp{l}\t{*}$dst # TAILCALL", []>;
-
- // FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
- // marker on instructions, while still being able to relax.
- let isCodeGenOnly = 1 in {
- def TAILJMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
- "jmp\t$dst # TAILCALL", []>;
- }
}
//===----------------------------------------------------------------------===//
@@ -815,7 +815,18 @@ def PUSHF32 : I<0x9C, RawFrm, (outs), (ins), "pushf{l|d}", []>,
Requires<[In32BitMode]>;
}
-let isTwoAddress = 1 in // GR32 = bswap GR32
+let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
+ mayLoad=1, neverHasSideEffects=1 in {
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", []>,
+ Requires<[In32BitMode]>;
+}
+let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
+ mayStore=1, neverHasSideEffects=1 in {
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", []>,
+ Requires<[In32BitMode]>;
+}
+
+let Uses = [EFLAGS], Constraints = "$src = $dst" in // GR32 = bswap GR32
def BSWAP32r : I<0xC8, AddRegFrm,
(outs GR32:$dst), (ins GR32:$src),
"bswap{l}\t$dst",
@@ -855,11 +866,11 @@ def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
let neverHasSideEffects = 1 in
def LEA16r : I<0x8D, MRMSrcMem,
- (outs GR16:$dst), (ins lea32mem:$src),
+ (outs GR16:$dst), (ins i32mem:$src),
"lea{w}\t{$src|$dst}, {$dst|$src}", []>, OpSize;
let isReMaterializable = 1 in
def LEA32r : I<0x8D, MRMSrcMem,
- (outs GR32:$dst), (ins lea32mem:$src),
+ (outs GR32:$dst), (ins i32mem:$src),
"lea{l}\t{$src|$dst}, {$dst|$src}",
[(set GR32:$dst, lea32addr:$src)]>, Requires<[In32BitMode]>;
@@ -1239,7 +1250,7 @@ def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src),
//===----------------------------------------------------------------------===//
// Two address Instructions.
//
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
// Conditional moves
let Uses = [EFLAGS] in {
@@ -1640,7 +1651,7 @@ def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
// clobber EFLAGS, because if one of the operands is zero, the expansion
// could involve an xor.
-let usesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS] in {
+let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
def CMOV_GR8 : I<0, Pseudo,
(outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
"#CMOV_GR8 PSEUDO!",
@@ -1659,86 +1670,106 @@ def CMOV_GR16 : I<0, Pseudo,
[(set GR16:$dst,
(X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
def CMOV_RFP32 : I<0, Pseudo,
- (outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
+ (outs RFP32:$dst),
+ (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
"#CMOV_RFP32 PSEUDO!",
- [(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
+ [(set RFP32:$dst,
+ (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
EFLAGS))]>;
def CMOV_RFP64 : I<0, Pseudo,
- (outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
+ (outs RFP64:$dst),
+ (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
"#CMOV_RFP64 PSEUDO!",
- [(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
+ [(set RFP64:$dst,
+ (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
EFLAGS))]>;
def CMOV_RFP80 : I<0, Pseudo,
- (outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
+ (outs RFP80:$dst),
+ (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
"#CMOV_RFP80 PSEUDO!",
- [(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
+ [(set RFP80:$dst,
+ (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
EFLAGS))]>;
} // Predicates = [NoCMov]
-} // UsesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS]
+} // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
} // Uses = [EFLAGS]
// unary instructions
let CodeSize = 2 in {
let Defs = [EFLAGS] in {
-def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src), "neg{b}\t$dst",
- [(set GR8:$dst, (ineg GR8:$src)),
+def NEG8r : I<0xF6, MRM3r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "neg{b}\t$dst",
+ [(set GR8:$dst, (ineg GR8:$src1)),
(implicit EFLAGS)]>;
-def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src), "neg{w}\t$dst",
- [(set GR16:$dst, (ineg GR16:$src)),
+def NEG16r : I<0xF7, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
+ "neg{w}\t$dst",
+ [(set GR16:$dst, (ineg GR16:$src1)),
(implicit EFLAGS)]>, OpSize;
-def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src), "neg{l}\t$dst",
- [(set GR32:$dst, (ineg GR32:$src)),
+def NEG32r : I<0xF7, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
+ "neg{l}\t$dst",
+ [(set GR32:$dst, (ineg GR32:$src1)),
(implicit EFLAGS)]>;
-let isTwoAddress = 0 in {
- def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst), "neg{b}\t$dst",
+
+let Constraints = "" in {
+ def NEG8m : I<0xF6, MRM3m, (outs), (ins i8mem :$dst),
+ "neg{b}\t$dst",
[(store (ineg (loadi8 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
- def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst), "neg{w}\t$dst",
+ def NEG16m : I<0xF7, MRM3m, (outs), (ins i16mem:$dst),
+ "neg{w}\t$dst",
[(store (ineg (loadi16 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>, OpSize;
- def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst), "neg{l}\t$dst",
+ def NEG32m : I<0xF7, MRM3m, (outs), (ins i32mem:$dst),
+ "neg{l}\t$dst",
[(store (ineg (loadi32 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
-}
+} // Constraints = ""
} // Defs = [EFLAGS]
// Match xor -1 to not. Favors these over a move imm + xor to save code size.
let AddedComplexity = 15 in {
-def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src), "not{b}\t$dst",
- [(set GR8:$dst, (not GR8:$src))]>;
-def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src), "not{w}\t$dst",
- [(set GR16:$dst, (not GR16:$src))]>, OpSize;
-def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src), "not{l}\t$dst",
- [(set GR32:$dst, (not GR32:$src))]>;
+def NOT8r : I<0xF6, MRM2r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "not{b}\t$dst",
+ [(set GR8:$dst, (not GR8:$src1))]>;
+def NOT16r : I<0xF7, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
+ "not{w}\t$dst",
+ [(set GR16:$dst, (not GR16:$src1))]>, OpSize;
+def NOT32r : I<0xF7, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
+ "not{l}\t$dst",
+ [(set GR32:$dst, (not GR32:$src1))]>;
}
-let isTwoAddress = 0 in {
- def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst), "not{b}\t$dst",
+let Constraints = "" in {
+ def NOT8m : I<0xF6, MRM2m, (outs), (ins i8mem :$dst),
+ "not{b}\t$dst",
[(store (not (loadi8 addr:$dst)), addr:$dst)]>;
- def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst), "not{w}\t$dst",
+ def NOT16m : I<0xF7, MRM2m, (outs), (ins i16mem:$dst),
+ "not{w}\t$dst",
[(store (not (loadi16 addr:$dst)), addr:$dst)]>, OpSize;
- def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst), "not{l}\t$dst",
+ def NOT32m : I<0xF7, MRM2m, (outs), (ins i32mem:$dst),
+ "not{l}\t$dst",
[(store (not (loadi32 addr:$dst)), addr:$dst)]>;
-}
+} // Constraints = ""
} // CodeSize
// TODO: inc/dec is slow for P4, but fast for Pentium-M.
let Defs = [EFLAGS] in {
let CodeSize = 2 in
-def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src))]>;
+def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "inc{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86inc_flag GR8:$src1))]>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
+def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"inc{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src))]>,
+ [(set GR16:$dst, EFLAGS, (X86inc_flag GR16:$src1))]>,
OpSize, Requires<[In32BitMode]>;
-def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
+def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"inc{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src))]>,
+ [(set GR32:$dst, EFLAGS, (X86inc_flag GR32:$src1))]>,
Requires<[In32BitMode]>;
}
-let isTwoAddress = 0, CodeSize = 2 in {
+let Constraints = "", CodeSize = 2 in {
def INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), "inc{b}\t$dst",
[(store (add (loadi8 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>;
@@ -1750,23 +1781,24 @@ let isTwoAddress = 0, CodeSize = 2 in {
[(store (add (loadi32 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>,
Requires<[In32BitMode]>;
-}
+} // Constraints = "", CodeSize = 2
let CodeSize = 2 in
-def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst",
- [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src))]>;
+def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
+ "dec{b}\t$dst",
+ [(set GR8:$dst, EFLAGS, (X86dec_flag GR8:$src1))]>;
let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA.
-def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src),
+def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src1),
"dec{w}\t$dst",
- [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src))]>,
+ [(set GR16:$dst, EFLAGS, (X86dec_flag GR16:$src1))]>,
OpSize, Requires<[In32BitMode]>;
-def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src),
+def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src1),
"dec{l}\t$dst",
- [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
+ [(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src1))]>,
Requires<[In32BitMode]>;
-}
+} // CodeSize = 2
-let isTwoAddress = 0, CodeSize = 2 in {
+let Constraints = "", CodeSize = 2 in {
def DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), "dec{b}\t$dst",
[(store (add (loadi8 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)]>;
@@ -1778,7 +1810,7 @@ let isTwoAddress = 0, CodeSize = 2 in {
[(store (add (loadi32 addr:$dst), -1), addr:$dst),
(implicit EFLAGS)]>,
Requires<[In32BitMode]>;
-}
+} // Constraints = "", CodeSize = 2
} // Defs = [EFLAGS]
// Logical operators...
@@ -1857,7 +1889,7 @@ def AND32ri8 : Ii8<0x83, MRM4r,
[(set GR32:$dst, EFLAGS, (X86and_flag GR32:$src1,
i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def AND8mr : I<0x20, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"and{b}\t{$src, $dst|$dst, $src}",
@@ -1909,7 +1941,7 @@ let isTwoAddress = 0 in {
def AND32i32 : Ii32<0x25, RawFrm, (outs), (ins i32imm:$src),
"and{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y
@@ -1983,7 +2015,7 @@ def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst),
"or{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, EFLAGS, (X86or_flag GR32:$src1,
i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def OR8mr : I<0x08, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src),
"or{b}\t{$src, $dst|$dst, $src}",
[(store (or (load addr:$dst), GR8:$src), addr:$dst),
@@ -2025,7 +2057,7 @@ let isTwoAddress = 0 in {
"or{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def OR32i32 : Ii32 <0x0D, RawFrm, (outs), (ins i32imm:$src),
"or{l}\t{$src, %eax|%eax, $src}", []>;
-} // isTwoAddress = 0
+} // Constraints = ""
let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y
@@ -2102,7 +2134,7 @@ def XOR32ri8 : Ii8<0x83, MRM6r,
[(set GR32:$dst, EFLAGS, (X86xor_flag GR32:$src1,
i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def XOR8mr : I<0x30, MRMDestMem,
(outs), (ins i8mem :$dst, GR8 :$src),
"xor{b}\t{$src, $dst|$dst, $src}",
@@ -2153,26 +2185,27 @@ let isTwoAddress = 0 in {
"xor{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def XOR32i32 : Ii32<0x35, RawFrm, (outs), (ins i32imm:$src),
"xor{l}\t{$src, %eax|%eax, $src}", []>;
-} // isTwoAddress = 0
+} // Constraints = ""
} // Defs = [EFLAGS]
// Shift instructions
let Defs = [EFLAGS] in {
let Uses = [CL] in {
-def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src),
+def SHL8rCL : I<0xD2, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1),
"shl{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (shl GR8:$src, CL))]>;
-def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (shl GR8:$src1, CL))]>;
+def SHL16rCL : I<0xD3, MRM4r, (outs GR16:$dst), (ins GR16:$src1),
"shl{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (shl GR16:$src, CL))]>, OpSize;
-def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (shl GR16:$src1, CL))]>, OpSize;
+def SHL32rCL : I<0xD3, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
"shl{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (shl GR32:$src, CL))]>;
+ [(set GR32:$dst, (shl GR32:$src1, CL))]>;
} // Uses = [CL]
def SHL8ri : Ii8<0xC0, MRM4r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
"shl{b}\t{$src2, $dst|$dst, $src2}",
[(set GR8:$dst, (shl GR8:$src1, (i8 imm:$src2)))]>;
+
let isConvertibleToThreeAddress = 1 in { // Can transform into LEA.
def SHL16ri : Ii8<0xC1, MRM4r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2),
"shl{w}\t{$src2, $dst|$dst, $src2}",
@@ -2193,7 +2226,7 @@ def SHL32r1 : I<0xD1, MRM4r, (outs GR32:$dst), (ins GR32:$src1),
} // isConvertibleToThreeAddress = 1
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHL8mCL : I<0xD2, MRM4m, (outs), (ins i8mem :$dst),
"shl{b}\t{%cl, $dst|$dst, CL}",
@@ -2227,18 +2260,18 @@ let isTwoAddress = 0 in {
def SHL32m1 : I<0xD1, MRM4m, (outs), (ins i32mem:$dst),
"shl{l}\t$dst",
[(store (shl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src),
+def SHR8rCL : I<0xD2, MRM5r, (outs GR8 :$dst), (ins GR8 :$src1),
"shr{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (srl GR8:$src, CL))]>;
-def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (srl GR8:$src1, CL))]>;
+def SHR16rCL : I<0xD3, MRM5r, (outs GR16:$dst), (ins GR16:$src1),
"shr{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (srl GR16:$src, CL))]>, OpSize;
-def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (srl GR16:$src1, CL))]>, OpSize;
+def SHR32rCL : I<0xD3, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (srl GR32:$src, CL))]>;
+ [(set GR32:$dst, (srl GR32:$src1, CL))]>;
}
def SHR8ri : Ii8<0xC0, MRM5r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
@@ -2262,7 +2295,7 @@ def SHR32r1 : I<0xD1, MRM5r, (outs GR32:$dst), (ins GR32:$src1),
"shr{l}\t$dst",
[(set GR32:$dst, (srl GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHR8mCL : I<0xD2, MRM5m, (outs), (ins i8mem :$dst),
"shr{b}\t{%cl, $dst|$dst, CL}",
@@ -2296,18 +2329,18 @@ let isTwoAddress = 0 in {
def SHR32m1 : I<0xD1, MRM5m, (outs), (ins i32mem:$dst),
"shr{l}\t$dst",
[(store (srl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src),
+def SAR8rCL : I<0xD2, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1),
"sar{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (sra GR8:$src, CL))]>;
-def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (sra GR8:$src1, CL))]>;
+def SAR16rCL : I<0xD3, MRM7r, (outs GR16:$dst), (ins GR16:$src1),
"sar{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (sra GR16:$src, CL))]>, OpSize;
-def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (sra GR16:$src1, CL))]>, OpSize;
+def SAR32rCL : I<0xD3, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (sra GR32:$src, CL))]>;
+ [(set GR32:$dst, (sra GR32:$src1, CL))]>;
}
def SAR8ri : Ii8<0xC0, MRM7r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2332,7 +2365,7 @@ def SAR32r1 : I<0xD1, MRM7r, (outs GR32:$dst), (ins GR32:$src1),
"sar{l}\t$dst",
[(set GR32:$dst, (sra GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SAR8mCL : I<0xD2, MRM7m, (outs), (ins i8mem :$dst),
"sar{b}\t{%cl, $dst|$dst, CL}",
@@ -2366,65 +2399,65 @@ let isTwoAddress = 0 in {
def SAR32m1 : I<0xD1, MRM7m, (outs), (ins i32mem:$dst),
"sar{l}\t$dst",
[(store (sra (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
// Rotate instructions
-def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src),
+def RCL8r1 : I<0xD0, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src),
+def RCL8rCL : I<0xD2, MRM2r, (outs GR8:$dst), (ins GR8:$src1),
"rcl{b}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src, i8imm:$cnt),
+def RCL8ri : Ii8<0xC0, MRM2r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcl{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src),
+def RCL16r1 : I<0xD1, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t{1, $dst|$dst, 1}", []>, OpSize;
let Uses = [CL] in {
-def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src),
+def RCL16rCL : I<0xD3, MRM2r, (outs GR16:$dst), (ins GR16:$src1),
"rcl{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
}
-def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt),
+def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src),
+def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"rcl{l}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src),
+def RCL32rCL : I<0xD3, MRM2r, (outs GR32:$dst), (ins GR32:$src1),
"rcl{l}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt),
+def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src),
+def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src),
+def RCR8rCL : I<0xD2, MRM3r, (outs GR8:$dst), (ins GR8:$src1),
"rcr{b}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src, i8imm:$cnt),
+def RCR8ri : Ii8<0xC0, MRM3r, (outs GR8:$dst), (ins GR8:$src1, i8imm:$cnt),
"rcr{b}\t{$cnt, $dst|$dst, $cnt}", []>;
-def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src),
+def RCR16r1 : I<0xD1, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t{1, $dst|$dst, 1}", []>, OpSize;
let Uses = [CL] in {
-def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src),
+def RCR16rCL : I<0xD3, MRM3r, (outs GR16:$dst), (ins GR16:$src1),
"rcr{w}\t{%cl, $dst|$dst, CL}", []>, OpSize;
}
-def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt),
+def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$cnt),
"rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize;
-def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src),
+def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t{1, $dst|$dst, 1}", []>;
let Uses = [CL] in {
-def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src),
+def RCR32rCL : I<0xD3, MRM3r, (outs GR32:$dst), (ins GR32:$src1),
"rcr{l}\t{%cl, $dst|$dst, CL}", []>;
}
-def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt),
+def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$cnt),
"rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def RCL8m1 : I<0xD0, MRM2m, (outs), (ins i8mem:$dst),
"rcl{b}\t{1, $dst|$dst, 1}", []>;
def RCL8mi : Ii8<0xC0, MRM2m, (outs), (ins i8mem:$dst, i8imm:$cnt),
@@ -2464,19 +2497,19 @@ def RCR16mCL : I<0xD3, MRM3m, (outs), (ins i16mem:$dst),
def RCR32mCL : I<0xD3, MRM3m, (outs), (ins i32mem:$dst),
"rcr{l}\t{%cl, $dst|$dst, CL}", []>;
}
-}
+} // Constraints = ""
// FIXME: provide shorter instructions when imm8 == 1
let Uses = [CL] in {
-def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src),
+def ROL8rCL : I<0xD2, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1),
"rol{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotl GR8:$src, CL))]>;
-def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (rotl GR8:$src1, CL))]>;
+def ROL16rCL : I<0xD3, MRM0r, (outs GR16:$dst), (ins GR16:$src1),
"rol{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotl GR16:$src, CL))]>, OpSize;
-def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (rotl GR16:$src1, CL))]>, OpSize;
+def ROL32rCL : I<0xD3, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotl GR32:$src, CL))]>;
+ [(set GR32:$dst, (rotl GR32:$src1, CL))]>;
}
def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2501,7 +2534,7 @@ def ROL32r1 : I<0xD1, MRM0r, (outs GR32:$dst), (ins GR32:$src1),
"rol{l}\t$dst",
[(set GR32:$dst, (rotl GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def ROL8mCL : I<0xD2, MRM0m, (outs), (ins i8mem :$dst),
"rol{b}\t{%cl, $dst|$dst, CL}",
@@ -2535,18 +2568,18 @@ let isTwoAddress = 0 in {
def ROL32m1 : I<0xD1, MRM0m, (outs), (ins i32mem:$dst),
"rol{l}\t$dst",
[(store (rotl (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
+} // Constraints = ""
let Uses = [CL] in {
-def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src),
+def ROR8rCL : I<0xD2, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1),
"ror{b}\t{%cl, $dst|$dst, CL}",
- [(set GR8:$dst, (rotr GR8:$src, CL))]>;
-def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src),
+ [(set GR8:$dst, (rotr GR8:$src1, CL))]>;
+def ROR16rCL : I<0xD3, MRM1r, (outs GR16:$dst), (ins GR16:$src1),
"ror{w}\t{%cl, $dst|$dst, CL}",
- [(set GR16:$dst, (rotr GR16:$src, CL))]>, OpSize;
-def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src),
+ [(set GR16:$dst, (rotr GR16:$src1, CL))]>, OpSize;
+def ROR32rCL : I<0xD3, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t{%cl, $dst|$dst, CL}",
- [(set GR32:$dst, (rotr GR32:$src, CL))]>;
+ [(set GR32:$dst, (rotr GR32:$src1, CL))]>;
}
def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2),
@@ -2571,7 +2604,7 @@ def ROR32r1 : I<0xD1, MRM1r, (outs GR32:$dst), (ins GR32:$src1),
"ror{l}\t$dst",
[(set GR32:$dst, (rotr GR32:$src1, (i8 1)))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def ROR8mCL : I<0xD2, MRM1m, (outs), (ins i8mem :$dst),
"ror{b}\t{%cl, $dst|$dst, CL}",
@@ -2605,8 +2638,7 @@ let isTwoAddress = 0 in {
def ROR32m1 : I<0xD1, MRM1m, (outs), (ins i32mem:$dst),
"ror{l}\t$dst",
[(store (rotr (loadi32 addr:$dst), (i8 1)), addr:$dst)]>;
-}
-
+} // Constraints = ""
// Double shift instructions (generalizations of rotate)
@@ -2662,7 +2694,7 @@ def SHRD16rri8 : Ii8<0xAC, MRMDestReg,
TB, OpSize;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
let Uses = [CL] in {
def SHLD32mrCL : I<0xA5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
"shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}",
@@ -2708,7 +2740,7 @@ let isTwoAddress = 0 in {
[(store (X86shrd (loadi16 addr:$dst), GR16:$src2,
(i8 imm:$src3)), addr:$dst)]>,
TB, OpSize;
-}
+} // Constraints = ""
} // Defs = [EFLAGS]
@@ -2794,7 +2826,7 @@ def ADD32ri8 : Ii8<0x83, MRM0r, (outs GR32:$dst),
(X86add_flag GR32:$src1, i32immSExt8:$src2))]>;
}
-let isTwoAddress = 0 in {
+let Constraints = "" in {
// Memory-Register Addition
def ADD8mr : I<0x00, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"add{b}\t{$src2, $dst|$dst, $src2}",
@@ -2838,7 +2870,7 @@ let isTwoAddress = 0 in {
"add{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def ADD32i32 : Ii32<0x05, RawFrm, (outs), (ins i32imm:$src),
"add{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let Uses = [EFLAGS] in {
let isCommutable = 1 in { // X = ADC Y, Z --> X = ADC Z, Y
@@ -2900,7 +2932,7 @@ def ADC32ri8 : Ii8<0x83, MRM2r, (outs GR32:$dst),
"adc{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (adde GR32:$src1, i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def ADC8mr : I<0x10, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"adc{b}\t{$src2, $dst|$dst, $src2}",
[(store (adde (load addr:$dst), GR8:$src2), addr:$dst)]>;
@@ -2935,7 +2967,7 @@ let isTwoAddress = 0 in {
"adc{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def ADC32i32 : Ii32<0x15, RawFrm, (outs), (ins i32imm:$src),
"adc{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
} // Uses = [EFLAGS]
// Register-Register Subtraction
@@ -3007,7 +3039,7 @@ def SUB32ri8 : Ii8<0x83, MRM5r, (outs GR32:$dst),
[(set GR32:$dst, EFLAGS,
(X86sub_flag GR32:$src1, i32immSExt8:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
// Memory-Register Subtraction
def SUB8mr : I<0x28, MRMDestMem, (outs), (ins i8mem :$dst, GR8 :$src2),
"sub{b}\t{$src2, $dst|$dst, $src2}",
@@ -3052,7 +3084,7 @@ let isTwoAddress = 0 in {
"sub{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def SUB32i32 : Ii32<0x2D, RawFrm, (outs), (ins i32imm:$src),
"sub{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let Uses = [EFLAGS] in {
def SBB8rr : I<0x18, MRMDestReg, (outs GR8:$dst),
@@ -3068,7 +3100,7 @@ def SBB32rr : I<0x19, MRMDestReg, (outs GR32:$dst),
"sbb{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (sube GR32:$src1, GR32:$src2))]>;
-let isTwoAddress = 0 in {
+let Constraints = "" in {
def SBB8mr : I<0x18, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
"sbb{b}\t{$src2, $dst|$dst, $src2}",
[(store (sube (load addr:$dst), GR8:$src2), addr:$dst)]>;
@@ -3103,7 +3135,7 @@ let isTwoAddress = 0 in {
"sbb{w}\t{$src, %ax|%ax, $src}", []>, OpSize;
def SBB32i32 : Ii32<0x1D, RawFrm, (outs), (ins i32imm:$src),
"sbb{l}\t{$src, %eax|%eax, $src}", []>;
-}
+} // Constraints = ""
let isCodeGenOnly = 1 in {
def SBB8rr_REV : I<0x1A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
@@ -3811,6 +3843,7 @@ def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
// Thread Local Storage Instructions
//
+// ELF TLS Support
// All calls clobber the non-callee saved registers. ESP is marked as
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead.
@@ -3819,12 +3852,24 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
Uses = [ESP] in
-def TLS_addr32 : I<0, Pseudo, (outs), (ins lea32mem:$sym),
+def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"leal\t$sym, %eax; "
"call\t___tls_get_addr@PLT",
[(X86tlsaddr tls32addr:$sym)]>,
Requires<[In32BitMode]>;
+// Darwin TLS Support
+// For i386, the address of the thunk is passed on the stack, on return the
+// address of the variable is in %eax. %ecx is trashed during the function
+// call. All other registers are preserved.
+let Defs = [EAX, ECX],
+ Uses = [ESP],
+ usesCustomInserter = 1 in
+def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
+ "# TLSCall_32",
+ [(X86TLSCall addr:$sym)]>,
+ Requires<[In32BitMode]>;
+
let AddedComplexity = 5, isCodeGenOnly = 1 in
def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
"movl\t%gs:$src, $dst",
@@ -4783,14 +4828,14 @@ def : Pat<(X86smul_flag GR32:$src1, 2), (ADD32rr GR32:$src1, GR32:$src1)>;
// Patterns for nodes that do not produce flags, for instructions that do.
// Increment reg.
-def : Pat<(add GR8:$src , 1), (INC8r GR8:$src)>;
-def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR8:$src1 , 1), (INC8r GR8:$src1)>;
+def : Pat<(add GR16:$src1, 1), (INC16r GR16:$src1)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src1, 1), (INC32r GR32:$src1)>, Requires<[In32BitMode]>;
// Decrement reg.
-def : Pat<(add GR8:$src , -1), (DEC8r GR8:$src)>;
-def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>, Requires<[In32BitMode]>;
-def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>, Requires<[In32BitMode]>;
+def : Pat<(add GR8:$src1 , -1), (DEC8r GR8:$src1)>;
+def : Pat<(add GR16:$src1, -1), (DEC16r GR16:$src1)>, Requires<[In32BitMode]>;
+def : Pat<(add GR32:$src1, -1), (DEC32r GR32:$src1)>, Requires<[In32BitMode]>;
// or reg/reg.
def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>;
diff --git a/contrib/llvm/lib/Target/X86/X86InstrMMX.td b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
index 0952fc8..6cf7ac8 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrMMX.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrMMX.td
@@ -513,30 +513,20 @@ def : Pat<(store (v4i16 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v2i32 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
-def : Pat<(store (v2f32 VR64:$src), addr:$dst),
- (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
def : Pat<(store (v1i64 VR64:$src), addr:$dst),
(MMX_MOVQ64mr addr:$dst, VR64:$src)>;
// Bit convert.
def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
-def : Pat<(v8i8 (bitconvert (v2f32 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
-def : Pat<(v4i16 (bitconvert (v2f32 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2i32 (bitconvert (v2f32 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v1i64 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v2i32 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v4i16 VR64:$src))), (v2f32 VR64:$src)>;
-def : Pat<(v2f32 (bitconvert (v8i8 VR64:$src))), (v2f32 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
-def : Pat<(v1i64 (bitconvert (v2f32 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
@@ -545,8 +535,6 @@ def : Pat<(v1i64 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
-def : Pat<(v2f32 (bitconvert (i64 GR64:$src))),
- (MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
(MMX_MOVD64to64rr GR64:$src)>;
def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
@@ -555,8 +543,6 @@ def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
-def : Pat<(i64 (bitconvert (v2f32 VR64:$src))),
- (MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
(MMX_MOVD64from64rr VR64:$src)>;
def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
index 5580ba7..ebe161b 100644
--- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td
@@ -15,322 +15,6 @@
//===----------------------------------------------------------------------===//
-// SSE specific DAG Nodes.
-//===----------------------------------------------------------------------===//
-
-def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
- SDTCisFP<0>, SDTCisInt<2> ]>;
-def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
- SDTCisFP<1>, SDTCisVT<3, i8>]>;
-
-def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
-def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
-def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
- [SDNPCommutative, SDNPAssociative]>;
-def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
-def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
-def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
-def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
-def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
-def X86pshufb : SDNode<"X86ISD::PSHUFB",
- SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
- SDTCisSameAs<0,2>]>>;
-def X86pextrb : SDNode<"X86ISD::PEXTRB",
- SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
-def X86pextrw : SDNode<"X86ISD::PEXTRW",
- SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
-def X86pinsrb : SDNode<"X86ISD::PINSRB",
- SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
- SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def X86pinsrw : SDNode<"X86ISD::PINSRW",
- SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
- SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
-def X86insrtps : SDNode<"X86ISD::INSERTPS",
- SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
- SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
-def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
- SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
-def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
- [SDNPHasChain, SDNPMayLoad]>;
-def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
-def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
-def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
-def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
-def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
-def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
-def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
-def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
-def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
-
-def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
- SDTCisVT<1, v4f32>,
- SDTCisVT<2, v4f32>]>;
-def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
-
-//===----------------------------------------------------------------------===//
-// SSE Complex Patterns
-//===----------------------------------------------------------------------===//
-
-// These are 'extloads' from a scalar to the low element of a vector, zeroing
-// the top elements. These are used for the SSE 'ss' and 'sd' instruction
-// forms.
-def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
-def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
- [SDNPHasChain, SDNPMayLoad]>;
-
-def ssmem : Operand<v4f32> {
- let PrintMethod = "printf32mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-def sdmem : Operand<v2f64> {
- let PrintMethod = "printf64mem";
- let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
- let ParserMatchClass = X86MemAsmOperand;
-}
-
-//===----------------------------------------------------------------------===//
-// SSE pattern fragments
-//===----------------------------------------------------------------------===//
-
-def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
-def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
-def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
-def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
-
-// Like 'store', but always requires vector alignment.
-def alignedstore : PatFrag<(ops node:$val, node:$ptr),
- (store node:$val, node:$ptr), [{
- return cast<StoreSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-// Like 'load', but always requires vector alignment.
-def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return cast<LoadSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-def alignedloadfsf32 : PatFrag<(ops node:$ptr),
- (f32 (alignedload node:$ptr))>;
-def alignedloadfsf64 : PatFrag<(ops node:$ptr),
- (f64 (alignedload node:$ptr))>;
-def alignedloadv4f32 : PatFrag<(ops node:$ptr),
- (v4f32 (alignedload node:$ptr))>;
-def alignedloadv2f64 : PatFrag<(ops node:$ptr),
- (v2f64 (alignedload node:$ptr))>;
-def alignedloadv4i32 : PatFrag<(ops node:$ptr),
- (v4i32 (alignedload node:$ptr))>;
-def alignedloadv2i64 : PatFrag<(ops node:$ptr),
- (v2i64 (alignedload node:$ptr))>;
-
-// Like 'load', but uses special alignment checks suitable for use in
-// memory operands in most SSE instructions, which are required to
-// be naturally aligned on some targets but not on others. If the subtarget
-// allows unaligned accesses, match any load, though this may require
-// setting a feature bit in the processor (on startup, for example).
-// Opteron 10h and later implement such a feature.
-def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return Subtarget->hasVectorUAMem()
- || cast<LoadSDNode>(N)->getAlignment() >= 16;
-}]>;
-
-def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
-def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
-def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
-def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
-def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
-def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
-def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
-
-// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
-// 16-byte boundary.
-// FIXME: 8 byte alignment for mmx reads is not required
-def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
- return cast<LoadSDNode>(N)->getAlignment() >= 8;
-}]>;
-
-def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
-def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
-def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
-def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
-
-// MOVNT Support
-// Like 'store', but requires the non-temporal bit to be set
-def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal();
- return false;
-}]>;
-
-def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() && !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED &&
- ST->getAlignment() >= 16;
- return false;
-}]>;
-
-def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
- (st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isNonTemporal() &&
- ST->getAlignment() < 16;
- return false;
-}]>;
-
-def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
-def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
-def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
-def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
-def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
-def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
-
-def vzmovl_v2i64 : PatFrag<(ops node:$src),
- (bitconvert (v2i64 (X86vzmovl
- (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
-def vzmovl_v4i32 : PatFrag<(ops node:$src),
- (bitconvert (v4i32 (X86vzmovl
- (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
-
-def vzload_v2i64 : PatFrag<(ops node:$src),
- (bitconvert (v2i64 (X86vzload node:$src)))>;
-
-
-def fp32imm0 : PatLeaf<(f32 fpimm), [{
- return N->isExactlyValue(+0.0);
-}]>;
-
-// BYTE_imm - Transform bit immediates into byte immediates.
-def BYTE_imm : SDNodeXForm<imm, [{
- // Transformation function: imm >> 3
- return getI32Imm(N->getZExtValue() >> 3);
-}]>;
-
-// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
-// SHUFP* etc. imm.
-def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShuffleSHUFImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
-// PSHUFHW imm.
-def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
-}]>;
-
-// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
-// PSHUFLW imm.
-def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
-}]>;
-
-// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
-// a PALIGNR imm.
-def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
- return getI8Imm(X86::getShufflePALIGNRImmediate(N));
-}]>;
-
-def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
- return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
-}]>;
-
-def movddup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movlp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
-}]>;
-
-def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def shufp : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_shuf_imm>;
-
-def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshufhw_imm>;
-
-def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_pshuflw_imm>;
-
-def palign : PatFrag<(ops node:$lhs, node:$rhs),
- (vector_shuffle node:$lhs, node:$rhs), [{
- return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
-}], SHUFFLE_get_palign_imm>;
-
-//===----------------------------------------------------------------------===//
// SSE scalar FP Instructions
//===----------------------------------------------------------------------===//
@@ -368,857 +52,645 @@ let Uses = [EFLAGS], usesCustomInserter = 1 in {
}
//===----------------------------------------------------------------------===//
-// SSE1 Instructions
+// SSE 1 & 2 Instructions Classes
//===----------------------------------------------------------------------===//
-// Move Instructions. Register-to-register movss is not used for FR32
-// register copies because it's a partial register update; FsMOVAPSrr is
-// used instead. Register-to-register movss is not modeled as an INSERT_SUBREG
-// because INSERT_SUBREG requires that the insert be implementable in terms of
-// a copy, and just mentioned, we don't use movss for copies.
-let Constraints = "$src1 = $dst" in
-def MOVSSrr : SSI<0x10, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
- "movss\t{$src2, $dst|$dst, $src2}",
- [(set (v4f32 VR128:$dst),
- (movl VR128:$src1, (scalar_to_vector FR32:$src2)))]>;
+/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
+multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, X86MemOperand x86memop,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in {
+ def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
+ }
+ def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
+}
+
+/// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
+multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ string asm, string SSEVer, string FPSizeStr,
+ Operand memopr, ComplexPattern mem_cpat,
+ bit Is2Addr = 1> {
+ def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, RC:$src2))]>;
+ def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, mem_cpat:$src2))]>;
+}
+
+/// sse12_fp_packed - SSE 1 & 2 packed instructions class
+multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, ValueType vt,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ Domain d, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ let mayLoad = 1 in
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
+}
+
+/// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
+multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
+ string OpcodeStr, X86MemOperand x86memop,
+ list<dag> pat_rr, list<dag> pat_rm,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ pat_rr, d>;
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ pat_rm, d>;
+}
+
+/// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
+multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
+ string asm, string SSEVer, string FPSizeStr,
+ X86MemOperand x86memop, PatFrag mem_frag,
+ Domain d, bit Is2Addr = 1> {
+ def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, RC:$src2))], d>;
+ def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
+ !strconcat(SSEVer, !strconcat("_",
+ !strconcat(OpcodeStr, FPSizeStr))))
+ RC:$src1, (mem_frag addr:$src2)))], d>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Move Instructions
+//===----------------------------------------------------------------------===//
+
+class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
+ SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
+ [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
+
+// Loading from memory automatically zeroing upper bits.
+class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
+ PatFrag mem_pat, string OpcodeStr> :
+ SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (mem_pat addr:$src))]>;
+
+// Move Instructions. Register-to-register movss/movsd is not used for FR32/64
+// register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
+// is used instead. Register-to-register movss/movsd is not modeled as an
+// INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
+// in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
+let isAsmParserOnly = 1 in {
+ def VMOVSSrr : sse12_move_rr<FR32, v4f32,
+ "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
+ def VMOVSDrr : sse12_move_rr<FR64, v2f64,
+ "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
+ let canFoldAsLoad = 1, isReMaterializable = 1 in {
+ def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
+
+ let AddedComplexity = 20 in
+ def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
+ }
+}
+
+let Constraints = "$src1 = $dst" in {
+ def MOVSSrr : sse12_move_rr<FR32, v4f32,
+ "movss\t{$src2, $dst|$dst, $src2}">, XS;
+ def MOVSDrr : sse12_move_rr<FR64, v2f64,
+ "movsd\t{$src2, $dst|$dst, $src2}">, XD;
+}
+
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
+ def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
+
+ let AddedComplexity = 20 in
+ def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
+}
+
+let AddedComplexity = 15 in {
// Extract the low 32-bit value from one vector and insert it into another.
-let AddedComplexity = 15 in
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
(MOVSSrr (v4f32 VR128:$src1),
(EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
+// Extract the low 64-bit value from one vector and insert it into another.
+def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
+ (MOVSDrr (v2f64 VR128:$src1),
+ (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
+}
// Implicitly promote a 32-bit scalar to a vector.
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
+// Implicitly promote a 64-bit scalar to a vector.
+def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
+ (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
-// Loading from memory automatically zeroing upper bits.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- "movss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (loadf32 addr:$src))]>;
-
+let AddedComplexity = 20 in {
// MOVSSrm zeros the high parts of the register; represent this
// with SUBREG_TO_REG.
-let AddedComplexity = 20 in {
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
+// MOVSDrm zeros the high parts of the register; represent this
+// with SUBREG_TO_REG.
+def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+def : Pat<(v2f64 (X86vzload addr:$src)),
+ (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
}
// Store scalar value to memory.
def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
"movss\t{$src, $dst|$dst, $src}",
[(store FR32:$src, addr:$dst)]>;
+def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
+ "movsd\t{$src, $dst|$dst, $src}",
+ [(store FR64:$src, addr:$dst)]>;
+
+let isAsmParserOnly = 1 in {
+def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
+ "movss\t{$src, $dst|$dst, $src}",
+ [(store FR32:$src, addr:$dst)]>, XS, VEX_4V;
+def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
+ "movsd\t{$src, $dst|$dst, $src}",
+ [(store FR64:$src, addr:$dst)]>, XD, VEX_4V;
+}
// Extract and store.
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
addr:$dst),
(MOVSSmr addr:$dst,
(EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
+ addr:$dst),
+ (MOVSDmr addr:$dst,
+ (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
-// Conversion instructions
-def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
-def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
-def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
- "cvtsi2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
-def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
- "cvtsi2ss\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
-
-// Match intrinsics which expect XMM operand(s).
-def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
- "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
-def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>;
-
-def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvtss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
-def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvtss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_cvtss2si
- (load addr:$src)))]>;
-
-// Match intrinsics which expect MM and XMM operand(s).
-def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
-def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvtps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtps2pi
- (load addr:$src)))]>;
-def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
-def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
- "cvttps2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttps2pi
- (load addr:$src)))]>;
-let Constraints = "$src1 = $dst" in {
- def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
- "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
- VR64:$src2))]>;
- def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
- "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
- (load addr:$src2)))]>;
-}
-
-// Aliases for intrinsics
-def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse_cvttss2si VR128:$src))]>;
-def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
- "cvttss2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse_cvttss2si(load addr:$src)))]>;
-
-let Constraints = "$src1 = $dst" in {
- def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
- "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
- GR32:$src2))]>;
- def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
- "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
- (loadi32 addr:$src2)))]>;
-}
-
-// Comparison instructions
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
- def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
- def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
- (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
-
- // Accept explicit immediate argument form instead of comparison code.
-let isAsmParserOnly = 1 in {
- def CMPSSrr_alt : SSIi8<0xC2, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
- "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
-let mayLoad = 1 in
- def CMPSSrm_alt : SSIi8<0xC2, MRMSrcMem,
- (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
- "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
-}
-}
-
-let Defs = [EFLAGS] in {
-def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp FR32:$src1, FR32:$src2))]>;
-def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp FR32:$src1, (loadf32 addr:$src2)))]>;
-
-def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}", []>;
-def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}", []>;
-
-} // Defs = [EFLAGS]
-
-// Aliases to match intrinsics which expect XMM operand(s).
-let Constraints = "$src1 = $dst" in {
- def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ss
- VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src, SSECC:$cc),
- "cmp${cc}ss\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
- (load addr:$src), imm:$cc))]>;
-}
-
-let Defs = [EFLAGS] in {
-def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
- VR128:$src2))]>;
-def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
- "ucomiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ucomi (v4f32 VR128:$src1),
- (load addr:$src2)))]>;
-
-def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86comi (v4f32 VR128:$src1),
- VR128:$src2))]>;
-def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comiss\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86comi (v4f32 VR128:$src1),
- (load addr:$src2)))]>;
-} // Defs = [EFLAGS]
-
-// Aliases of packed SSE1 instructions for scalar use. These all have names
-// that start with 'Fs'.
-
-// Alias instructions that map fld0 to pxor for sse.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
- canFoldAsLoad = 1 in
- // FIXME: Set encoding to pseudo!
-def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
- [(set FR32:$dst, fp32imm0)]>,
- Requires<[HasSSE1]>, TB, OpSize;
-
-// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
-// disregarded.
+// Move Aligned/Unaligned floating point values
+multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
+ X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d,
+ bit IsReMaterializable = 1> {
let neverHasSideEffects = 1 in
-def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
-
-// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
-// disregarded.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
- "movaps\t{$src, $dst|$dst, $src}",
- [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
-
-// Alias bitwise logical operations using SSE logical ops on packed FP values.
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in {
- def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
- def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
- def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
-}
-
-def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fand FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86for FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f128mem:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set FR32:$dst, (X86fxor FR32:$src1,
- (memopfsf32 addr:$src2)))]>;
-
-let neverHasSideEffects = 1 in {
-def FsANDNPSrr : PSI<0x55, MRMSrcReg,
- (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}", []>;
-let mayLoad = 1 in
-def FsANDNPSrm : PSI<0x55, MRMSrcMem,
- (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}", []>;
-}
+ def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
+let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
+ def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set RC:$dst, (ld_frag addr:$src))], d>;
}
-/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a scalar)
-/// and leaves the top elements unmodified (therefore these cannot be commuted).
-///
-/// These three forms can each be reg+reg or reg+mem, so there are a total of
-/// six "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F32Int,
- bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]>;
-
- // Intrinsic operation, reg+mem.
- def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1,
- sse_load_f32:$src2))]>;
-}
-}
-
-// Arithmetic instructions
-defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
-defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
-defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
-defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
-
-/// sse1_fp_binop_rm - Other SSE1 binops
-///
-/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
-/// instructions for a full-vector intrinsic form. Operations that map
-/// onto C operators don't use this form since they just use the plain
-/// vector form instead of having a separate vector intrinsic form.
-///
-/// This provides a total of eight "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F32Int,
- Intrinsic V4F32Int,
- bit Commutable = 0> {
-
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, reg+mem.
- def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F32Int VR128:$src1,
- sse_load_f32:$src2))]>;
-
- // Vector intrinsic operation, reg+reg.
- def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Vector intrinsic operation, reg+mem.
- def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, (memopv4f32 addr:$src2)))]>;
-}
+let isAsmParserOnly = 1 in {
+defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
+ "movaps", SSEPackedSingle>, VEX;
+defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
+ "movapd", SSEPackedDouble>, OpSize, VEX;
+defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
+ "movups", SSEPackedSingle>, VEX;
+defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
+ "movupd", SSEPackedDouble, 0>, OpSize, VEX;
+
+defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
+ "movaps", SSEPackedSingle>, VEX;
+defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
+ "movapd", SSEPackedDouble>, OpSize, VEX;
+defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
+ "movups", SSEPackedSingle>, VEX;
+defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
+ "movupd", SSEPackedDouble, 0>, OpSize, VEX;
}
+defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
+ "movaps", SSEPackedSingle>, TB;
+defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
+ "movapd", SSEPackedDouble>, TB, OpSize;
+defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
+ "movups", SSEPackedSingle>, TB;
+defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
+ "movupd", SSEPackedDouble, 0>, TB, OpSize;
-defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
- int_x86_sse_max_ss, int_x86_sse_max_ps>;
-defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
- int_x86_sse_min_ss, int_x86_sse_min_ps>;
-
-//===----------------------------------------------------------------------===//
-// SSE packed FP Instructions
-
-// Move Instructions
-let neverHasSideEffects = 1 in
-def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movaps\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+let isAsmParserOnly = 1 in {
+def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
-
+ [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
+def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
+def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
+def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
+def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
+def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
+}
def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movaps\t{$src, $dst|$dst, $src}",
[(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
-
-let neverHasSideEffects = 1 in
-def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movups\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movups\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (loadv4f32 addr:$src))]>;
+def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
[(store (v4f32 VR128:$src), addr:$dst)]>;
+def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(store (v2f64 VR128:$src), addr:$dst)]>;
-// Intrinsic forms of MOVUPS load and store
+// Intrinsic forms of MOVUPS/D load and store
+let isAsmParserOnly = 1 in {
+ let canFoldAsLoad = 1, isReMaterializable = 1 in
+ def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
+ def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
+ def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movups\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
+ def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
+}
let canFoldAsLoad = 1, isReMaterializable = 1 in
def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"movups\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
+def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
+
def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
"movups\t{$src, $dst|$dst, $src}",
[(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
+def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movupd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
-let Constraints = "$src1 = $dst" in {
- let AddedComplexity = 20 in {
- def MOVLPSrm : PSI<0x12, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movlps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movlp VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
- def MOVHPSrm : PSI<0x16, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (movlhps VR128:$src1,
- (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
+// Move Low/High packed floating point values
+multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
+ PatFrag mov_frag, string base_opc,
+ string asm_opr> {
+ def PSrm : PI<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
+ !strconcat(!strconcat(base_opc,"s"), asm_opr),
+ [(set RC:$dst,
+ (mov_frag RC:$src1,
+ (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
+ SSEPackedSingle>, TB;
+
+ def PDrm : PI<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
+ !strconcat(!strconcat(base_opc,"d"), asm_opr),
+ [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
+ (scalar_to_vector (loadf64 addr:$src2)))))],
+ SSEPackedDouble>, TB, OpSize;
+}
-def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
- (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
+let isAsmParserOnly = 1, AddedComplexity = 20 in {
+ defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+ defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
+}
+let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
+ defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
+ "\t{$src2, $dst|$dst, $src2}">;
+ defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
+ "\t{$src2, $dst|$dst, $src2}">;
+}
+let isAsmParserOnly = 1 in {
+def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlps\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
+ (iPTR 0))), addr:$dst)]>, VEX;
+def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (v2f64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+}
def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movlps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
(iPTR 0))), addr:$dst)]>;
+def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movlpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract (v2f64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>;
// v2f64 extract element 1 is always custom lowered to unpack high to low
// and extract element 0 so the non-store version isn't too horrible.
+let isAsmParserOnly = 1 in {
+def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhps\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (unpckh (bc_v2f64 (v4f32 VR128:$src)),
+ (undef)), (iPTR 0))), addr:$dst)]>,
+ VEX;
+def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (v2f64 (unpckh VR128:$src, (undef))),
+ (iPTR 0))), addr:$dst)]>,
+ VEX;
+}
def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
"movhps\t{$src, $dst|$dst, $src}",
[(store (f64 (vector_extract
(unpckh (bc_v2f64 (v4f32 VR128:$src)),
(undef)), (iPTR 0))), addr:$dst)]>;
+def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
+ "movhpd\t{$src, $dst|$dst, $src}",
+ [(store (f64 (vector_extract
+ (v2f64 (unpckh VR128:$src, (undef))),
+ (iPTR 0))), addr:$dst)]>;
-let Constraints = "$src1 = $dst" in {
-let AddedComplexity = 20 in {
-def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- "movlhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
-
-def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- "movhlps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
-} // AddedComplexity
-} // Constraints = "$src1 = $dst"
+let isAsmParserOnly = 1, AddedComplexity = 20 in {
+ def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
+ VEX_4V;
+ def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
+ VEX_4V;
+}
+let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
+ def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movlhps\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
+ def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ "movhlps\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst,
+ (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
+}
+def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
+ (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
let AddedComplexity = 20 in {
-def : Pat<(v4f32 (movddup VR128:$src, (undef))),
- (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
-def : Pat<(v2i64 (movddup VR128:$src, (undef))),
- (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
+ def : Pat<(v4f32 (movddup VR128:$src, (undef))),
+ (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
+ def : Pat<(v2i64 (movddup VR128:$src, (undef))),
+ (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
}
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Conversion Instructions
+//===----------------------------------------------------------------------===//
+multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
+}
-// Arithmetic
-
-/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a
-/// scalar) and leaves the top elements undefined.
-///
-/// And, we have a special variant form for a full-vector intrinsic form.
-///
-/// These four forms can each have a reg or a mem operand, so there are a
-/// total of eight "instructions".
-///
-multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F32Int,
- Intrinsic V4F32Int,
- bit Commutable = 0> {
- // Scalar operation, reg.
- def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode FR32:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, mem.
- def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
- Requires<[HasSSE1, OptForSize]>;
+multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
+}
- // Vector operation, reg.
- def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
- let isCommutable = Commutable;
- }
+multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
+ asm, []>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src), asm, []>;
+}
- // Vector operation, mem.
- def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
+let isAsmParserOnly = 1 in {
+defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
+ "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
+defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
+defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
+ "cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS,
+ VEX_4V;
+defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
+ "cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD,
+ VEX_4V;
+}
- // Intrinsic operation, reg.
- def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
+defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
+ "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
+defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
+ "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
+defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
+ "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
+defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
+ "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
+
+// Conversion Instructions Intrinsics - Match intrinsics which expect MM
+// and/or XMM operand(s).
+multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (Int SrcRC:$src))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))], d>;
+}
- // Intrinsic operation, mem.
- def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
- !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
+ Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
+ string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
+ [(set DstRC:$dst, (Int SrcRC:$src))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
+ [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
+}
- // Vector intrinsic operation, reg
- def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
+multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC,
+ RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm, Domain d> {
+ def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
+ asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))], d>;
+ def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))], d>;
+}
- // Vector intrinsic operation, mem
- def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
+multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
+ RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm> {
+ def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
+ asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
+ def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+ (ins DstRC:$src1, x86memop:$src2), asm,
+ [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
}
-// Square root.
-defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
- int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
+let isAsmParserOnly = 1 in {
+ defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS,
+ VEX;
+ defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD,
+ VEX;
+}
+defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
+ f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS;
+defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
+ f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD;
-// Reciprocal approximations. Note that these typically require refinement
-// in order to obtain suitable precision.
-defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
- int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
-defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
- int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
-// Logical
let Constraints = "$src1 = $dst" in {
- let isCommutable = 1 in {
- def ANDPSrr : PSI<0x54, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (and VR128:$src1, VR128:$src2)))]>;
- def ORPSrr : PSI<0x56, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (or VR128:$src1, VR128:$src2)))]>;
- def XORPSrr : PSI<0x57, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64
- (xor VR128:$src1, VR128:$src2)))]>;
- }
-
- def ANDPSrm : PSI<0x54, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "andps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ORPSrm : PSI<0x56, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "orps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def XORPSrm : PSI<0x57, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "xorps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ANDNPSrr : PSI<0x55, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (and (xor VR128:$src1,
- (bc_v2i64 (v4i32 immAllOnesV))),
- VR128:$src2)))]>;
- def ANDNPSrm : PSI<0x55, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
- "andnps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
- (bc_v2i64 (v4i32 immAllOnesV))),
- (memopv2i64 addr:$src2))))]>;
+ defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse_cvtsi2ss, i32mem, loadi32,
+ "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS;
+ defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+ int_x86_sse2_cvtsi2sd, i32mem, loadi32,
+ "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD;
}
+// Instructions below don't have an AVX form.
+defm Int_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
+ f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm Int_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
+ f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm Int_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
+ f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB;
+defm Int_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
+ f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
+defm Int_CVTPI2PD : sse12_cvt_pint<0x2A, VR64, VR128, int_x86_sse_cvtpi2pd,
+ i64mem, load, "cvtpi2pd\t{$src, $dst|$dst, $src}",
+ SSEPackedDouble>, TB, OpSize;
let Constraints = "$src1 = $dst" in {
- def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
- "cmp${cc}ps\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
- (memop addr:$src), imm:$cc))]>;
-
- // Accept explicit immediate argument form instead of comparison code.
-let isAsmParserOnly = 1 in {
- def CMPPSrri_alt : PSIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, i8imm:$src2),
- "cmpps\t{$src2, $src, $dst|$dst, $src, $src}", []>;
- def CMPPSrmi_alt : PSIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, i8imm:$src2),
- "cmpps\t{$src2, $src, $dst|$dst, $src, $src}", []>;
-}
+ defm Int_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
+ int_x86_sse_cvtpi2ps,
+ i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
}
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
-def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
-
-// Shuffle and unpack instructions
-let Constraints = "$src1 = $dst" in {
- let isConvertibleToThreeAddress = 1 in // Convert to pshufd
- def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1,
- VR128:$src2, i8imm:$src3),
- "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v4f32 (shufp:$src3 VR128:$src1, VR128:$src2)))]>;
- def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- f128mem:$src2, i8imm:$src3),
- "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v4f32 (shufp:$src3
- VR128:$src1, (memopv4f32 addr:$src2))))]>;
-
- let AddedComplexity = 10 in {
- def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpckhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckh VR128:$src1, VR128:$src2)))]>;
- def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpckhps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckh VR128:$src1,
- (memopv4f32 addr:$src2))))]>;
-
- def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpcklps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4f32 (unpckl VR128:$src1, VR128:$src2)))]>;
- def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpcklps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1, (memopv4f32 addr:$src2)))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
-// Mask creation
-def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "movmskps\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
-def MOVMSKPDrr : PDI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "movmskpd\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
-
-// Prefetch intrinsic.
-def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
- "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
-def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
- "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
-def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
- "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
-def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
- "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
-
-// Non-temporal stores
-def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movntps\t{$src, $dst|$dst, $src}",
- [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
-let AddedComplexity = 400 in { // Prefer non-temporal versions
-def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntps\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
-
-def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
+/// SSE 1 Only
-def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
- TB, Requires<[HasSSE2]>;
-
-def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
- TB, Requires<[HasSSE2]>;
+// Aliases for intrinsics
+let isAsmParserOnly = 1, Pattern = []<dag> in {
+defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
+ int_x86_sse_cvttss2si, f32mem, load,
+ "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS;
+defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32,
+ int_x86_sse2_cvttsd2si, f128mem, load,
+ "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD;
}
-
-// Load, store, and memory fence
-def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
- TB, Requires<[HasSSE1]>;
-
-// MXCSR register
-def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
- "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
-def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
- "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
-
-// Alias instructions that map zero vector to pxor / xorp* for sse.
-// We set canFoldAsLoad because this can be converted to a constant-pool
-// load of an all-zeros value if folding it would be beneficial.
-// FIXME: Change encoding to pseudo!
-let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1 in {
-def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4f32 immAllZerosV))]>;
-def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v2f64 immAllZerosV))]>;
-let ExeDomain = SSEPackedInt in
-def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllZerosV))]>;
+defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
+ f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
+ XS;
+defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
+ f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">,
+ XD;
+
+let isAsmParserOnly = 1, Pattern = []<dag> in {
+defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
+defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB, VEX;
+defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, f256mem, load,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB, VEX;
}
-
-def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
-def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
-def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
-
-def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
- (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
-
-//===---------------------------------------------------------------------===//
-// SSE2 Instructions
-//===---------------------------------------------------------------------===//
-
-// Move Instructions. Register-to-register movsd is not used for FR64
-// register copies because it's a partial register update; FsMOVAPDrr is
-// used instead. Register-to-register movsd is not modeled as an INSERT_SUBREG
-// because INSERT_SUBREG requires that the insert be implementable in terms of
-// a copy, and just mentioned, we don't use movsd for copies.
-let Constraints = "$src1 = $dst" in
-def MOVSDrr : SDI<0x10, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
- "movsd\t{$src2, $dst|$dst, $src2}",
- [(set (v2f64 VR128:$dst),
- (movl VR128:$src1, (scalar_to_vector FR64:$src2)))]>;
-
-// Extract the low 64-bit value from one vector and insert it into another.
-let AddedComplexity = 15 in
-def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
- (MOVSDrr (v2f64 VR128:$src1),
- (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
-
-// Implicitly promote a 64-bit scalar to a vector.
-def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
- (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
-
-// Loading from memory automatically zeroing upper bits.
-let canFoldAsLoad = 1, isReMaterializable = 1, AddedComplexity = 20 in
-def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
- "movsd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (loadf64 addr:$src))]>;
-
-// MOVSDrm zeros the high parts of the register; represent this
-// with SUBREG_TO_REG.
-let AddedComplexity = 20 in {
-def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
-def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
-def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
-def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
-def : Pat<(v2f64 (X86vzload addr:$src)),
- (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
+let Pattern = []<dag> in {
+defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
+ "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
+defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/,
+ "cvtdq2ps\t{$src, $dst|$dst, $src}",
+ SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
}
-// Store scalar value to memory.
-def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
- "movsd\t{$src, $dst|$dst, $src}",
- [(store FR64:$src, addr:$dst)]>;
-
-// Extract and store.
-def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- addr:$dst),
- (MOVSDmr addr:$dst,
- (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+/// SSE 2 Only
-// Conversion instructions
-def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
-def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
+// Convert scalar double to scalar single
+let isAsmParserOnly = 1 in {
+def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
+ (ins FR64:$src1, FR64:$src2),
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
+ (ins FR64:$src1, f64mem:$src2),
+ "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
+}
def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround FR64:$src))]>;
@@ -1226,35 +698,28 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
"cvtsd2ss\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
Requires<[HasSSE2, OptForSize]>;
-def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
- "cvtsi2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
-def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
- "cvtsi2sd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
-def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
-def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PSrr : PSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;
-def CVTDQ2PSrm : PSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtdq2ps\t{$src, $dst|$dst, $src}", []>;
-def COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}", []>;
-def COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}", []>;
-
-// SSE2 instructions with XS prefix
+let isAsmParserOnly = 1 in
+defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
+ int_x86_sse2_cvtsd2ss, f64mem, load,
+ "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">,
+ XS, VEX_4V;
+let Constraints = "$src1 = $dst" in
+defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
+ int_x86_sse2_cvtsd2ss, f64mem, load,
+ "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS;
+
+// Convert scalar single to scalar double
+let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
+def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
+ (ins FR32:$src1, FR32:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XS, Requires<[HasAVX]>, VEX_4V;
+def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
+ (ins FR32:$src1, f32mem:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
+}
def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
"cvtss2sd\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (fextend FR32:$src))]>, XS,
@@ -1264,394 +729,51 @@ def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
[(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
Requires<[HasSSE2, OptForSize]>;
-def : Pat<(extloadf32 addr:$src),
- (CVTSS2SDrr (MOVSSrm addr:$src))>,
- Requires<[HasSSE2, OptForSpeed]>;
-
-// Match intrinsics which expect XMM operand(s).
-def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvtsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
-def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
- "cvtsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvtsd2si
- (load addr:$src)))]>;
-
-// Match intrinsics which expect MM and XMM operand(s).
-def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
-def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
- "cvtpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvtpd2pi
- (memop addr:$src)))]>;
-def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
-def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
- "cvttpd2pi\t{$src, $dst|$dst, $src}",
- [(set VR64:$dst, (int_x86_sse_cvttpd2pi
- (memop addr:$src)))]>;
-def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
-def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "cvtpi2pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse_cvtpi2pd
- (load addr:$src)))]>;
-
-// Aliases for intrinsics
-def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst,
- (int_x86_sse2_cvttsd2si VR128:$src))]>;
-def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
- "cvttsd2si\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_cvttsd2si
- (load addr:$src)))]>;
-
-// Comparison instructions
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
- def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
-let mayLoad = 1 in
- def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
- (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
-
- // Accept explicit immediate argument form instead of comparison code.
let isAsmParserOnly = 1 in {
- def CMPSDrr_alt : SDIi8<0xC2, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
- "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
-let mayLoad = 1 in
- def CMPSDrm_alt : SDIi8<0xC2, MRMSrcMem,
- (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
- "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
-}
-}
-
-let Defs = [EFLAGS] in {
-def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp FR64:$src1, FR64:$src2))]>;
-def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86cmp FR64:$src1, (loadf64 addr:$src2)))]>;
-} // Defs = [EFLAGS]
-
-// Aliases to match intrinsics which expect XMM operand(s).
-let Constraints = "$src1 = $dst" in {
- def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, f64mem:$src, SSECC:$cc),
- "cmp${cc}sd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
- (load addr:$src), imm:$cc))]>;
-}
-
-let Defs = [EFLAGS] in {
-def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ucomi (v2f64 VR128:$src1),
- VR128:$src2))]>;
-def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
- "ucomisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86ucomi (v2f64 VR128:$src1),
- (load addr:$src2)))]>;
-
-def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86comi (v2f64 VR128:$src1),
- VR128:$src2))]>;
-def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
- "comisd\t{$src2, $src1|$src1, $src2}",
- [(set EFLAGS, (X86comi (v2f64 VR128:$src1),
- (load addr:$src2)))]>;
-} // Defs = [EFLAGS]
-
-// Aliases of packed SSE2 instructions for scalar use. These all have names
-// that start with 'Fs'.
-
-// Alias instructions that map fld0 to pxor for sse.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
- canFoldAsLoad = 1 in
-def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
- [(set FR64:$dst, fpimm0)]>,
- Requires<[HasSSE2]>, TB, OpSize;
-
-// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
-// disregarded.
-let neverHasSideEffects = 1 in
-def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
-
-// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
-// disregarded.
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
-
-// Alias bitwise logical operations using SSE logical ops on packed FP values.
-let Constraints = "$src1 = $dst" in {
-let isCommutable = 1 in {
- def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
- def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
- def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
-}
-
-def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fand FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86for FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f128mem:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set FR64:$dst, (X86fxor FR64:$src1,
- (memopfsf64 addr:$src2)))]>;
-
-let neverHasSideEffects = 1 in {
-def FsANDNPDrr : PDI<0x55, MRMSrcReg,
- (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}", []>;
-let mayLoad = 1 in
-def FsANDNPDrm : PDI<0x55, MRMSrcMem,
- (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}", []>;
-}
-}
-
-/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
-///
-/// In addition, we also have a special variant of the scalar form here to
-/// represent the associated intrinsic operation. This form is unlike the
-/// plain scalar form, in that it takes an entire vector (instead of a scalar)
-/// and leaves the top elements unmodified (therefore these cannot be commuted).
-///
-/// These three forms can each be reg+reg or reg+mem, so there are a total of
-/// six "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode, Intrinsic F64Int,
- bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]>;
-
- // Intrinsic operation, reg+mem.
- def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1,
- sse_load_f64:$src2))]>;
+def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ VR128:$src2))]>, XS, VEX_4V,
+ Requires<[HasAVX]>;
+def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ (load addr:$src2)))]>, XS, VEX_4V,
+ Requires<[HasAVX]>;
}
+let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
+def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "cvtss2sd\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ VR128:$src2))]>, XS,
+ Requires<[HasSSE2]>;
+def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
+ "cvtss2sd\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
+ (load addr:$src2)))]>, XS,
+ Requires<[HasSSE2]>;
}
-// Arithmetic instructions
-defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
-defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
-defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
-defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
-
-/// sse2_fp_binop_rm - Other SSE2 binops
-///
-/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
-/// instructions for a full-vector intrinsic form. Operations that map
-/// onto C operators don't use this form since they just use the plain
-/// vector form instead of having a separate vector intrinsic form.
-///
-/// This provides a total of eight "instructions".
-///
-let Constraints = "$src1 = $dst" in {
-multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F64Int,
- Intrinsic V2F64Int,
- bit Commutable = 0> {
-
- // Scalar operation, reg+reg.
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
- // Vector operation, reg+reg.
- def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, reg+mem.
- def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
-
- // Intrinsic operation, reg+reg.
- def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, reg+mem.
- def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (F64Int VR128:$src1,
- sse_load_f64:$src2))]>;
-
- // Vector intrinsic operation, reg+reg.
- def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
+def : Pat<(extloadf32 addr:$src),
+ (CVTSS2SDrr (MOVSSrm addr:$src))>,
+ Requires<[HasSSE2, OptForSpeed]>;
- // Vector intrinsic operation, reg+mem.
- def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1,
- (memopv2f64 addr:$src2)))]>;
-}
+// Convert doubleword to packed single/double fp
+let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
+def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
+ TB, VEX, Requires<[HasAVX]>;
+def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vcvtdq2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
+ (bitconvert (memopv2i64 addr:$src))))]>,
+ TB, VEX, Requires<[HasAVX]>;
}
-
-defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
- int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
-defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
- int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
-
-//===---------------------------------------------------------------------===//
-// SSE packed FP Instructions
-
-// Move Instructions
-let neverHasSideEffects = 1 in
-def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, isReMaterializable = 1 in
-def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
-
-def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movapd\t{$src, $dst|$dst, $src}",
- [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
-
-let neverHasSideEffects = 1 in
-def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1 in
-def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (loadv2f64 addr:$src))]>;
-def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(store (v2f64 VR128:$src), addr:$dst)]>;
-
-// Intrinsic forms of MOVUPD load and store
-def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
-def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movupd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
-
-let Constraints = "$src1 = $dst" in {
- let AddedComplexity = 20 in {
- def MOVLPDrm : PDI<0x12, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movlpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (movlp VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))))]>;
- def MOVHPDrm : PDI<0x16, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "movhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (movlhps VR128:$src1,
- (scalar_to_vector (loadf64 addr:$src2)))))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
-
-def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movlpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract (v2f64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
-
-// v2f64 extract element 1 is always custom lowered to unpack high to low
-// and extract element 0 so the non-store version isn't too horrible.
-def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
- "movhpd\t{$src, $dst|$dst, $src}",
- [(store (f64 (vector_extract
- (v2f64 (unpckh VR128:$src, (undef))),
- (iPTR 0))), addr:$dst)]>;
-
-// SSE2 instructions without OpSize prefix
def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
@@ -1662,7 +784,18 @@ def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
(bitconvert (memopv2i64 addr:$src))))]>,
TB, Requires<[HasSSE2]>;
-// SSE2 instructions with XS prefix
+// FIXME: why the non-intrinsic version is described as SSE3?
+let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
+def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
+ (bitconvert (memopv2i64 addr:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtdq2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
@@ -1673,6 +806,33 @@ def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
(bitconvert (memopv2i64 addr:$src))))]>,
XS, Requires<[HasSSE2]>;
+// Convert packed single/double fp to doubleword
+let isAsmParserOnly = 1 in {
+def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
+
+let isAsmParserOnly = 1 in {
+def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
+ VEX;
+def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvtps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2dq
+ (memop addr:$src)))]>, VEX;
+}
def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
@@ -1680,12 +840,58 @@ def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2dq
(memop addr:$src)))]>;
-// SSE2 packed instructions with XS prefix
+
+let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
+def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ XD, VEX, Requires<[HasAVX]>;
+def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
+ (memop addr:$src)))]>,
+ XD, VEX, Requires<[HasAVX]>;
+}
+def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
+ XD, Requires<[HasSSE2]>;
+def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
+ (memop addr:$src)))]>,
+ XD, Requires<[HasSSE2]>;
+
+
+// Convert with truncation packed single/double fp to doubleword
+let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
+def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+}
def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}", []>;
+
+let isAsmParserOnly = 1 in {
+def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (int_x86_sse2_cvttps2dq VR128:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvttps2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttps2dq
+ (memop addr:$src)))]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttps2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -1697,17 +903,18 @@ def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
(memop addr:$src)))]>,
XS, Requires<[HasSSE2]>;
-// SSE2 packed instructions with XD prefix
-def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
- XD, Requires<[HasSSE2]>;
-def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "cvtpd2dq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
- (memop addr:$src)))]>,
- XD, Requires<[HasSSE2]>;
-
+let isAsmParserOnly = 1 in {
+def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
+ VEX;
+def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
+ (memop addr:$src)))]>, VEX;
+}
def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvttpd2dq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
@@ -1716,12 +923,56 @@ def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
[(set VR128:$dst, (int_x86_sse2_cvttpd2dq
(memop addr:$src)))]>;
-// SSE2 instructions without OpSize prefix
+let isAsmParserOnly = 1 in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
+
+// Convert packed single to packed double
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // SSE2 instructions without OpSize prefix
+def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
+ "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+}
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
+let isAsmParserOnly = 1 in {
+def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
+ VEX, Requires<[HasAVX]>;
+def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ "cvtps2pd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtps2pd
+ (load addr:$src)))]>,
+ VEX, Requires<[HasAVX]>;
+}
def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtps2pd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
@@ -1732,12 +983,44 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
(load addr:$src)))]>,
TB, Requires<[HasSSE2]>;
+// Convert packed double to packed single
+let isAsmParserOnly = 1 in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
+let isAsmParserOnly = 1 in {
+def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
+def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
+ (ins f128mem:$src),
+ "cvtpd2ps\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
+ (memop addr:$src)))]>;
+}
def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"cvtpd2ps\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
@@ -1746,269 +1029,1049 @@ def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
[(set VR128:$dst, (int_x86_sse2_cvtpd2ps
(memop addr:$src)))]>;
-// Match intrinsics which expect XMM operand(s).
-// Aliases for intrinsics
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Compare Instructions
+//===----------------------------------------------------------------------===//
+
+// sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
+multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
+ string asm, string asm_alt> {
+ def rr : SIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
+ asm, []>;
+ let mayLoad = 1 in
+ def rm : SIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
+ asm, []>;
+ // Accept explicit immediate argument form instead of comparison code.
+ let isAsmParserOnly = 1 in {
+ def rr_alt : SIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
+ asm_alt, []>;
+ let mayLoad = 1 in
+ def rm_alt : SIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
+ asm_alt, []>;
+ }
+}
+
+let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
+ defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
+ XS, VEX_4V;
+ defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
+ XD, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
+ defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}",
+ "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
+ defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}",
+ "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
+}
+
+multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
+ Intrinsic Int, string asm> {
+ def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
+ [(set VR128:$dst, (Int VR128:$src1,
+ VR128:$src, imm:$cc))]>;
+ def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
+ [(set VR128:$dst, (Int VR128:$src1,
+ (load addr:$src), imm:$cc))]>;
+}
+
+// Aliases to match intrinsics which expect XMM operand(s).
+let isAsmParserOnly = 1 in {
+ defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
+ XS, VEX_4V;
+ defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
+ XD, VEX_4V;
+}
let Constraints = "$src1 = $dst" in {
-def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
- "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
- GR32:$src2))]>;
-def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
- "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
- (loadi32 addr:$src2)))]>;
-def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
- VR128:$src2))]>;
-def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
- "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
- (load addr:$src2)))]>;
-def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- VR128:$src2))]>, XS,
- Requires<[HasSSE2]>;
-def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
- "cvtss2sd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
- (load addr:$src2)))]>, XS,
- Requires<[HasSSE2]>;
+ defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
+ "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
+ defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
+ "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
+}
+
+
+// sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
+multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
+ ValueType vt, X86MemOperand x86memop,
+ PatFrag ld_frag, string OpcodeStr, Domain d> {
+ def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
+ def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
+ !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
+ [(set EFLAGS, (OpNode (vt RC:$src1),
+ (ld_frag addr:$src2)))], d>;
+}
+
+let Defs = [EFLAGS] in {
+ let isAsmParserOnly = 1 in {
+ defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
+ "ucomiss", SSEPackedSingle>, VEX;
+ defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
+ "ucomisd", SSEPackedDouble>, OpSize, VEX;
+ let Pattern = []<dag> in {
+ defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, VEX;
+ defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, OpSize, VEX;
+ }
+
+ defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss", SSEPackedSingle>, VEX;
+ defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
+
+ defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
+ load, "comiss", SSEPackedSingle>, VEX;
+ defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
+ load, "comisd", SSEPackedDouble>, OpSize, VEX;
+ }
+ defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
+ "ucomiss", SSEPackedSingle>, TB;
+ defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
+ "ucomisd", SSEPackedDouble>, TB, OpSize;
+
+ let Pattern = []<dag> in {
+ defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, TB;
+ defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, TB, OpSize;
+ }
+
+ defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
+ load, "ucomiss", SSEPackedSingle>, TB;
+ defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
+ load, "ucomisd", SSEPackedDouble>, TB, OpSize;
+
+ defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
+ "comiss", SSEPackedSingle>, TB;
+ defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
+ "comisd", SSEPackedDouble>, TB, OpSize;
+} // Defs = [EFLAGS]
+
+// sse12_cmp_packed - sse 1 & 2 compared packed instructions
+multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
+ Intrinsic Int, string asm, string asm_alt,
+ Domain d> {
+ def rri : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
+ def rmi : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
+ [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
+ // Accept explicit immediate argument form instead of comparison code.
+ let isAsmParserOnly = 1 in {
+ def rri_alt : PIi8<0xC2, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
+ asm_alt, [], d>;
+ def rmi_alt : PIi8<0xC2, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
+ asm_alt, [], d>;
+ }
+}
+
+let isAsmParserOnly = 1 in {
+ defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+ "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+ "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ let Pattern = []<dag> in {
+ defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_sse_cmp_ps,
+ "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_sse2_cmp_pd,
+ "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
+ "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ }
+}
+let Constraints = "$src1 = $dst" in {
+ defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
+ "cmp${cc}ps\t{$src, $dst|$dst, $src}",
+ "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
+ SSEPackedSingle>, TB;
+ defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
+ "cmp${cc}pd\t{$src, $dst|$dst, $src}",
+ "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+}
+
+def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
+def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
+def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
+ (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
+def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
+ (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Shuffle Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_shuffle - sse 1 & 2 shuffle instructions
+multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
+ ValueType vt, string asm, PatFrag mem_frag,
+ Domain d, bit IsConvertibleToThreeAddress = 0> {
+ def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm,
+ [(set VR128:$dst, (vt (shufp:$src3
+ VR128:$src1, (mem_frag addr:$src2))))], d>;
+ let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
+ def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3), asm,
+ [(set VR128:$dst,
+ (vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>;
+}
+
+let isAsmParserOnly = 1 in {
+ defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
+ "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
+ memopv4f32, SSEPackedSingle>, VEX_4V;
+ defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
+ "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
+ memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
+ "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
+ TB;
+ defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
+ "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ memopv2f64, SSEPackedDouble>, TB, OpSize;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Unpack Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
+multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
+ PatFrag mem_frag, RegisterClass RC,
+ X86MemOperand x86memop, string asm,
+ Domain d> {
+ def rr : PI<opc, MRMSrcReg,
+ (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1, RC:$src2)))], d>;
+ def rm : PI<opc, MRMSrcMem,
+ (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
+ asm, [(set RC:$dst,
+ (vt (OpNode RC:$src1,
+ (mem_frag addr:$src2))))], d>;
+}
+
+let AddedComplexity = 10 in {
+ let isAsmParserOnly = 1 in {
+ defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+
+ defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
+ VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
+ VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
+ VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedSingle>, VEX_4V;
+ defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
+ VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ SSEPackedDouble>, OpSize, VEX_4V;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
+ VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
+ VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+ defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
+ VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
+ SSEPackedSingle>, TB;
+ defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
+ VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
+ SSEPackedDouble>, TB, OpSize;
+ } // Constraints = "$src1 = $dst"
+} // AddedComplexity
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Extract Floating-Point Sign mask
+//===----------------------------------------------------------------------===//
+
+/// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
+multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
+ Domain d> {
+ def rr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
+ !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+ [(set GR32:$dst, (Int RC:$src))], d>;
+}
+
+// Mask creation
+defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
+ SSEPackedSingle>, TB;
+defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
+ SSEPackedDouble>, TB, OpSize;
+
+let isAsmParserOnly = 1 in {
+ defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
+ "movmskps", SSEPackedSingle>, VEX;
+ defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
+ "movmskpd", SSEPackedDouble>, OpSize,
+ VEX;
+ // FIXME: merge with multiclass above when the intrinsics come.
+ def VMOVMSKPSYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
+ "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
+ def VMOVMSKPDYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src),
+ "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
+ VEX;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
+//===----------------------------------------------------------------------===//
+
+// Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
+// names that start with 'Fs'.
+
+// Alias instructions that map fld0 to pxor for sse.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
+ canFoldAsLoad = 1 in {
+ // FIXME: Set encoding to pseudo!
+def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
+ [(set FR32:$dst, fp32imm0)]>,
+ Requires<[HasSSE1]>, TB, OpSize;
+def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
+ [(set FR64:$dst, fpimm0)]>,
+ Requires<[HasSSE2]>, TB, OpSize;
}
-// Arithmetic
+// Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
+// bits are disregarded.
+let neverHasSideEffects = 1 in {
+def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ "movaps\t{$src, $dst|$dst, $src}", []>;
+def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
+ "movapd\t{$src, $dst|$dst, $src}", []>;
+}
-/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
+// Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
+// bits are disregarded.
+let canFoldAsLoad = 1, isReMaterializable = 1 in {
+def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
+ "movaps\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
+def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
+ "movapd\t{$src, $dst|$dst, $src}",
+ [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Logical Instructions
+//===----------------------------------------------------------------------===//
+
+/// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
///
+multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
+ let isAsmParserOnly = 1 in {
+ defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
+ FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
+
+ defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
+ FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
+ f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
+
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
+ f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
+ }
+}
+
+// Alias bitwise logical operations using SSE logical ops on packed FP values.
+let mayLoad = 0 in {
+ defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
+ defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
+ defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
+}
+
+let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
+ defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
+
+/// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
+///
+multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, int HasPat = 0,
+ list<list<dag>> Pattern = []> {
+ let isAsmParserOnly = 1, Pattern = []<dag> in {
+ defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f128mem,
+ !if(HasPat, Pattern[0], // rr
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
+ VR128:$src2)))]),
+ !if(HasPat, Pattern[2], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
+ (memopv2i64 addr:$src2)))]), 0>,
+ VEX_4V;
+
+ defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f128mem,
+ !if(HasPat, Pattern[1], // rr
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (bc_v2i64 (v2f64
+ VR128:$src2))))]),
+ !if(HasPat, Pattern[3], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (memopv2i64 addr:$src2)))]), 0>,
+ OpSize, VEX_4V;
+ }
+ let Constraints = "$src1 = $dst" in {
+ defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f128mem,
+ !if(HasPat, Pattern[0], // rr
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
+ VR128:$src2)))]),
+ !if(HasPat, Pattern[2], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
+ (memopv2i64 addr:$src2)))])>, TB;
+
+ defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f128mem,
+ !if(HasPat, Pattern[1], // rr
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (bc_v2i64 (v2f64
+ VR128:$src2))))]),
+ !if(HasPat, Pattern[3], // rm
+ [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
+ (memopv2i64 addr:$src2)))])>,
+ TB, OpSize;
+ }
+}
+
+/// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
+///
+let isAsmParserOnly = 1 in {
+multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
+ defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
+ !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
+
+ defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
+ !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
+}
+}
+
+// AVX 256-bit packed logical ops forms
+defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
+defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
+defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
+let isCommutable = 0 in
+ defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
+
+defm AND : sse12_fp_packed_logical<0x54, "and", and>;
+defm OR : sse12_fp_packed_logical<0x56, "or", or>;
+defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
+let isCommutable = 0 in
+ defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
+ // single r+r
+ [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
+ (bc_v2i64 (v4i32 immAllOnesV))),
+ VR128:$src2)))],
+ // double r+r
+ [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
+ (bc_v2i64 (v2f64 VR128:$src2))))],
+ // single r+m
+ [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
+ (bc_v2i64 (v4i32 immAllOnesV))),
+ (memopv2i64 addr:$src2))))],
+ // double r+m
+ [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
+ (memopv2i64 addr:$src2)))]]>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Arithmetic Instructions
+//===----------------------------------------------------------------------===//
+
+/// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
+/// vector forms.
+///
+/// In addition, we also have a special variant of the scalar form here to
+/// represent the associated intrinsic operation. This form is unlike the
+/// plain scalar form, in that it takes an entire vector (instead of a scalar)
+/// and leaves the top elements unmodified (therefore these cannot be commuted).
+///
+/// These three forms can each be reg+reg or reg+mem.
+///
+multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ bit Is2Addr = 1> {
+ defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
+ OpNode, FR32, f32mem, Is2Addr>, XS;
+ defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
+ OpNode, FR64, f64mem, Is2Addr>, XD;
+}
+
+multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ bit Is2Addr = 1> {
+ let mayLoad = 0 in {
+ defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
+ v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
+ defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
+ v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
+ }
+}
+
+multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
+ let mayLoad = 0 in {
+ defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
+ v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
+ defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
+ v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
+ }
+}
+
+multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
+ bit Is2Addr = 1> {
+ defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
+ defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
+}
+
+multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
+ bit Is2Addr = 1> {
+ defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "ps"), "", "_ps", f128mem, memopv4f32,
+ SSEPackedSingle, Is2Addr>, TB;
+
+ defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
+ !strconcat(OpcodeStr, "pd"), "2", "_pd", f128mem, memopv2f64,
+ SSEPackedDouble, Is2Addr>, TB, OpSize;
+}
+
+// Binary Arithmetic instructions
+let isAsmParserOnly = 1 in {
+ defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
+ basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
+ basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
+ defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
+ basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
+ basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
+
+ let isCommutable = 0 in {
+ defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
+ basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
+ basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
+ defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
+ basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
+ basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
+ defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
+ basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>, VEX_4V;
+ defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
+ basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
+ }
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
+ basic_sse12_fp_binop_p<0x58, "add", fadd>,
+ basic_sse12_fp_binop_s_int<0x58, "add">;
+ defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
+ basic_sse12_fp_binop_p<0x59, "mul", fmul>,
+ basic_sse12_fp_binop_s_int<0x59, "mul">;
+
+ let isCommutable = 0 in {
+ defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
+ basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
+ basic_sse12_fp_binop_s_int<0x5C, "sub">;
+ defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
+ basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
+ basic_sse12_fp_binop_s_int<0x5E, "div">;
+ defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
+ basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
+ basic_sse12_fp_binop_s_int<0x5F, "max">,
+ basic_sse12_fp_binop_p_int<0x5F, "max">;
+ defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
+ basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
+ basic_sse12_fp_binop_s_int<0x5D, "min">,
+ basic_sse12_fp_binop_p_int<0x5D, "min">;
+ }
+}
+
+/// Unop Arithmetic
/// In addition, we also have a special variant of the scalar form here to
/// represent the associated intrinsic operation. This form is unlike the
/// plain scalar form, in that it takes an entire vector (instead of a
/// scalar) and leaves the top elements undefined.
///
/// And, we have a special variant form for a full-vector intrinsic form.
-///
-/// These four forms can each have a reg or a mem operand, so there are a
-/// total of eight "instructions".
-///
-multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
- SDNode OpNode,
- Intrinsic F64Int,
- Intrinsic V2F64Int,
- bit Commutable = 0> {
- // Scalar operation, reg.
+
+/// sse1_fp_unop_s - SSE1 unops in scalar form.
+multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F32Int> {
+ def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode FR32:$src))]>;
+ // For scalar unary operations, fold a load into the operation
+ // only in OptForSize mode. It eliminates an instruction, but it also
+ // eliminates a whole-register clobber (the load), so it introduces a
+ // partial register update condition.
+ def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
+ Requires<[HasSSE1, OptForSize]>;
+ def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F32Int VR128:$src))]>;
+ def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
+ !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
+}
+
+/// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
+multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F32Int> {
+ def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
+ !strconcat(!strconcat("v", OpcodeStr),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
+ !strconcat(!strconcat("v", OpcodeStr),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, XS, Requires<[HasAVX, OptForSize]>;
+ def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(!strconcat("v", OpcodeStr),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, ssmem:$src2),
+ !strconcat(!strconcat("v", OpcodeStr),
+ "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+}
+
+/// sse1_fp_unop_p - SSE1 unops in packed form.
+multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
+ def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
+}
+
+/// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
+multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
+ def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
+}
+
+/// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
+multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V4F32Int> {
+ def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (V4F32Int VR128:$src))]>;
+ def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
+}
+
+
+/// sse2_fp_unop_s - SSE2 unops in scalar form.
+multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F64Int> {
def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode FR64:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, mem.
- def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
+ [(set FR64:$dst, (OpNode FR64:$src))]>;
+ // See the comments in sse1_fp_unop_s for why this is OptForSize.
+ def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set FR64:$dst, (OpNode (load addr:$src)))]>;
+ [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
+ Requires<[HasSSE2, OptForSize]>;
+ def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F64Int VR128:$src))]>;
+ def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
+ !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+}
+
+/// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
+multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, Intrinsic F64Int> {
+ def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst),
+ (ins FR64:$src1, f64mem:$src2),
+ !strconcat(OpcodeStr,
+ "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
+ def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+ def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, sdmem:$src2),
+ !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>;
+}
- // Vector operation, reg.
+/// sse2_fp_unop_p - SSE2 unops in vector forms.
+multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
+ SDNode OpNode> {
def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
- let isCommutable = Commutable;
- }
-
- // Vector operation, mem.
+ [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
+}
- // Intrinsic operation, reg.
- def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Intrinsic operation, mem.
- def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
- !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
+/// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
+multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
+ def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
+ def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
+ !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
+ [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
+}
- // Vector intrinsic operation, reg
+/// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
+multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
+ Intrinsic V2F64Int> {
def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (V2F64Int VR128:$src))]> {
- let isCommutable = Commutable;
- }
-
- // Vector intrinsic operation, mem
+ [(set VR128:$dst, (V2F64Int VR128:$src))]>;
def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
!strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // Square root.
+ defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
+ sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
+ VEX_4V;
+
+ defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
+ sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
+ sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
+ sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
+ VEX;
+
+ // Reciprocal approximations. Note that these typically require refinement
+ // in order to obtain suitable precision.
+ defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt,
+ int_x86_sse_rsqrt_ss>, VEX_4V;
+ defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
+ sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>, VEX;
+
+ defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
+ VEX_4V;
+ defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
+ sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>, VEX;
+}
+
// Square root.
-defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
- int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
+defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
+ sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
+ sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
+ sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
+ sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
+ sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
+
+// Reciprocal approximations. Note that these typically require refinement
+// in order to obtain suitable precision.
+defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
+ sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
+ sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
+defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
+ sse1_fp_unop_p<0x53, "rcp", X86frcp>,
+ sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
// There is no f64 version of the reciprocal approximation instructions.
-// Logical
-let Constraints = "$src1 = $dst" in {
- let isCommutable = 1 in {
- def ANDPDrr : PDI<0x54, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def ORPDrr : PDI<0x56, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (or (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def XORPDrr : PDI<0x57, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (xor (bc_v2i64 (v2f64 VR128:$src1)),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- }
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Non-temporal stores
+//===----------------------------------------------------------------------===//
- def ANDPDrm : PDI<0x54, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "andpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ORPDrm : PDI<0x56, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "orpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (or (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def XORPDrm : PDI<0x57, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "xorpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (xor (bc_v2i64 (v2f64 VR128:$src1)),
- (memopv2i64 addr:$src2)))]>;
- def ANDNPDrr : PDI<0x55, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (bc_v2i64 (v2f64 VR128:$src2))))]>;
- def ANDNPDrm : PDI<0x55, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
- "andnpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
- (memopv2i64 addr:$src2)))]>;
+let isAsmParserOnly = 1 in {
+ def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
+ def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
+
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
+
+ let AddedComplexity = 400 in { // Prefer non-temporal versions
+ def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src),
+ addr:$dst)]>, VEX;
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src),
+ addr:$dst)]>, VEX;
+
+ def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8f32 VR256:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f64 VR256:$src),
+ addr:$dst)]>, VEX;
+ def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f64 VR256:$src),
+ addr:$dst)]>, VEX;
+ let ExeDomain = SSEPackedInt in
+ def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
+ (ins f256mem:$dst, VR256:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v8f32 VR256:$src),
+ addr:$dst)]>, VEX;
+ }
}
-let Constraints = "$src1 = $dst" in {
- def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
- "cmp${cc}pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
- VR128:$src, imm:$cc))]>;
- def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
- "cmp${cc}pd\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
- (memop addr:$src), imm:$cc))]>;
+def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
+def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
+
+let ExeDomain = SSEPackedInt in
+def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
+
+let AddedComplexity = 400 in { // Prefer non-temporal versions
+def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntps\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntpd\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
+
+def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
+
+let ExeDomain = SSEPackedInt in
+def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
+ "movntdq\t{$src, $dst|$dst, $src}",
+ [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
+
+// There is no AVX form for instructions below this point
+def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
+ TB, Requires<[HasSSE2]>;
+
+def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
+ TB, Requires<[HasSSE2]>;
- // Accept explicit immediate argument form instead of comparison code.
-let isAsmParserOnly = 1 in {
- def CMPPDrri_alt : PDIi8<0xC2, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src, i8imm:$src2),
- "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
- def CMPPDrmi_alt : PDIi8<0xC2, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, i8imm:$src2),
- "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}", []>;
}
+def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
+ "movnti\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
+ TB, Requires<[HasSSE2]>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Misc Instructions (No AVX form)
+//===----------------------------------------------------------------------===//
+
+// Prefetch intrinsic.
+def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
+ "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
+def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
+ "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
+def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
+ "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
+def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
+ "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
+
+// Load, store, and memory fence
+def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
+ TB, Requires<[HasSSE1]>;
+
+// Alias instructions that map zero vector to pxor / xorp* for sse.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-zeros value if folding it would be beneficial.
+// FIXME: Change encoding to pseudo!
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isCodeGenOnly = 1 in {
+def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4f32 immAllZerosV))]>;
+def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v2f64 immAllZerosV))]>;
+let ExeDomain = SSEPackedInt in
+def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllZerosV))]>;
}
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
- (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
-def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
- (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
-// Shuffle and unpack instructions
-let Constraints = "$src1 = $dst" in {
- def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v2f64 (shufp:$src3 VR128:$src1, VR128:$src2)))]>;
- def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- f128mem:$src2, i8imm:$src3),
- "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (v2f64 (shufp:$src3
- VR128:$src1, (memopv2f64 addr:$src2))))]>;
+def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
+def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
+def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
- let AddedComplexity = 10 in {
- def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpckhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckh VR128:$src1, VR128:$src2)))]>;
- def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpckhpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckh VR128:$src1,
- (memopv2f64 addr:$src2))))]>;
-
- def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "unpcklpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v2f64 (unpckl VR128:$src1, VR128:$src2)))]>;
- def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "unpcklpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1, (memopv2f64 addr:$src2)))]>;
- } // AddedComplexity
-} // Constraints = "$src1 = $dst"
+def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
+ (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
+
+//===----------------------------------------------------------------------===//
+// SSE 1 & 2 - Load/Store XCSR register
+//===----------------------------------------------------------------------===//
+let isAsmParserOnly = 1 in {
+ def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
+ def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
+}
+
+def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
+ "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
+def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
+ "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
//===---------------------------------------------------------------------===//
-// SSE integer instructions
-let ExeDomain = SSEPackedInt in {
+// SSE2 - Move Aligned/Unaligned Packed Integer Instructions
+//===---------------------------------------------------------------------===//
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
+
+let isAsmParserOnly = 1 in {
+ let neverHasSideEffects = 1 in
+ def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
+ def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
+
+ let canFoldAsLoad = 1, mayLoad = 1 in {
+ def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>,
+ VEX;
+ def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
+ XS, VEX, Requires<[HasAVX]>;
+ }
+
+ let mayStore = 1 in {
+ def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
+ (ins i128mem:$dst, VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>, VEX;
+ def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
+ XS, VEX, Requires<[HasAVX]>;
+ }
+}
-// Move Instructions
let neverHasSideEffects = 1 in
def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movdqa\t{$src, $dst|$dst, $src}", []>;
-let canFoldAsLoad = 1, mayLoad = 1 in
+
+let canFoldAsLoad = 1, mayLoad = 1 in {
def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqa\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
-let mayStore = 1 in
-def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movdqa\t{$src, $dst|$dst, $src}",
- [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
-let canFoldAsLoad = 1, mayLoad = 1 in
def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
XS, Requires<[HasSSE2]>;
-let mayStore = 1 in
+}
+
+let mayStore = 1 in {
+def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "movdqa\t{$src, $dst|$dst, $src}",
+ [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
"movdqu\t{$src, $dst|$dst, $src}",
[/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
XS, Requires<[HasSSE2]>;
+}
// Intrinsic forms of MOVDQU load and store
+let isAsmParserOnly = 1 in {
+let canFoldAsLoad = 1 in
+def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
+ XS, VEX, Requires<[HasAVX]>;
+def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
+ "vmovdqu\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
+ XS, VEX, Requires<[HasAVX]>;
+}
+
let canFoldAsLoad = 1 in
def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movdqu\t{$src, $dst|$dst, $src}",
@@ -2019,55 +2082,72 @@ def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
[(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
XS, Requires<[HasSSE2]>;
-let Constraints = "$src1 = $dst" in {
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Arithmetic Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in { // SSE integer instructions
multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
- bit Commutable = 0> {
+ bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
- let isCommutable = Commutable;
- }
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
- (bitconvert (memopv2i64
- addr:$src2))))]>;
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1,
+ (bitconvert (memopv2i64 addr:$src2))))]>;
}
multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
- string OpcodeStr,
- Intrinsic IntId, Intrinsic IntId2> {
+ string OpcodeStr, Intrinsic IntId,
+ Intrinsic IntId2, bit Is2Addr = 1> {
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId VR128:$src1,
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId VR128:$src1,
(bitconvert (memopv2i64 addr:$src2))))]>;
def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
- (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
+ (ins VR128:$src1, i32i8imm:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
}
/// PDI_binop_rm - Simple SSE2 binary operator.
multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit Commutable = 0> {
+ ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
(bitconvert (memopv2i64 addr:$src2)))))]>;
}
@@ -2077,64 +2157,177 @@ multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
/// to collapse (bitconvert VT to VT) into its operand.
///
multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
- bit Commutable = 0> {
+ bit IsCommutable = 0, bit Is2Addr = 1> {
+ let isCommutable = IsCommutable in
def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
- let isCommutable = Commutable;
- }
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (memopv2i64 addr:$src2)))]>;
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
}
-} // Constraints = "$src1 = $dst"
} // ExeDomain = SSEPackedInt
// 128-bit Integer Arithmetic
-defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
-defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
-defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
-defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
-
-defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
-defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
-defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
-defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
+defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
+defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
+defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
+defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
+defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
+defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
+defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
+defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
+
+// Intrinsic forms
+defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
+ VEX_4V;
+defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
+ VEX_4V;
+defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
+ VEX_4V;
+defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
+ VEX_4V;
+defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
+ VEX_4V;
+defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
+ VEX_4V;
+defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
+ VEX_4V;
+defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
+ VEX_4V;
+defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
+ VEX_4V;
+defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
+ VEX_4V;
+defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
+ VEX_4V;
+defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
+ VEX_4V;
+defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
+ VEX_4V;
+defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
+ VEX_4V;
+defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
+ VEX_4V;
+defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
+ VEX_4V;
+defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
+ VEX_4V;
+defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
+ VEX_4V;
+defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
+ VEX_4V;
+}
+let Constraints = "$src1 = $dst" in {
+defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
+defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
+defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
+defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
+defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
+// Intrinsic forms
defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
-
-defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
-
+defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
+defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
+defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
+defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
-defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
+defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
-
defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
+defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
+defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
+defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
+defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
+defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
+defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
+defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
-defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
-defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
+} // Constraints = "$src1 = $dst"
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Logical Instructions
+//===---------------------------------------------------------------------===//
-defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
-defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
-defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
-defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
-defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
+ int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
+ VEX_4V;
+defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
+ int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
+ VEX_4V;
+defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
+ int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
+ VEX_4V;
+
+defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
+ int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
+ VEX_4V;
+defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
+ int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
+ VEX_4V;
+defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
+ int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
+ VEX_4V;
+
+defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
+ int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
+ VEX_4V;
+defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
+ int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
+ VEX_4V;
+
+defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
+defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
+defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
+let ExeDomain = SSEPackedInt in {
+ let neverHasSideEffects = 1 in {
+ // 128-bit logical shifts.
+ def VPSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+ def VPSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
+ VEX_4V;
+ // PSRADQri doesn't exist in SSE[1-3].
+ }
+ def VPANDNrr : PDI<0xDF, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ VR128:$src2)))]>, VEX_4V;
+
+ def VPANDNrm : PDI<0xDF, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ (memopv2i64 addr:$src2))))]>,
+ VEX_4V;
+}
+}
+let Constraints = "$src1 = $dst" in {
defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
@@ -2154,17 +2347,34 @@ defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
-// 128-bit logical shifts.
-let Constraints = "$src1 = $dst", neverHasSideEffects = 1,
- ExeDomain = SSEPackedInt in {
- def PSLLDQri : PDIi8<0x73, MRM7r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "pslldq\t{$src2, $dst|$dst, $src2}", []>;
- def PSRLDQri : PDIi8<0x73, MRM3r,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- "psrldq\t{$src2, $dst|$dst, $src2}", []>;
- // PSRADQri doesn't exist in SSE[1-3].
+defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
+defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
+defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
+
+let ExeDomain = SSEPackedInt in {
+ let neverHasSideEffects = 1 in {
+ // 128-bit logical shifts.
+ def PSLLDQri : PDIi8<0x73, MRM7r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "pslldq\t{$src2, $dst|$dst, $src2}", []>;
+ def PSRLDQri : PDIi8<0x73, MRM3r,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "psrldq\t{$src2, $dst|$dst, $src2}", []>;
+ // PSRADQri doesn't exist in SSE[1-3].
+ }
+ def PANDNrr : PDI<0xDF, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ VR128:$src2)))]>;
+
+ def PANDNrm : PDI<0xDF, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "pandn\t{$src2, $dst|$dst, $src2}",
+ [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
+ (memopv2i64 addr:$src2))))]>;
}
+} // Constraints = "$src1 = $dst"
let Predicates = [HasSSE2] in {
def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
@@ -2185,32 +2395,33 @@ let Predicates = [HasSSE2] in {
(v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
}
-// Logical
-defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
-defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
-defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
-
-let Constraints = "$src1 = $dst", ExeDomain = SSEPackedInt in {
- def PANDNrr : PDI<0xDF, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- VR128:$src2)))]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Comparison Instructions
+//===---------------------------------------------------------------------===//
- def PANDNrm : PDI<0xDF, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "pandn\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
- (memopv2i64 addr:$src2))))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
+ 0>, VEX_4V;
+ defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
+ 0>, VEX_4V;
+ defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
+ 0>, VEX_4V;
+ defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
+ 0>, VEX_4V;
+ defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
+ 0>, VEX_4V;
+ defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
+ 0>, VEX_4V;
}
-// SSE2 Integer comparison
-defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
-defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
-defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
-defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
-defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
-defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
+let Constraints = "$src1 = $dst" in {
+ defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
+ defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
+ defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
+ defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
+ defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
+ defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
+} // Constraints = "$src1 = $dst"
def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
(PCMPEQBrr VR128:$src1, VR128:$src2)>;
@@ -2238,94 +2449,147 @@ def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
(PCMPGTDrm VR128:$src1, addr:$src2)>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Pack Instructions
+//===---------------------------------------------------------------------===//
-// Pack instructions
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
+ 0, 0>, VEX_4V;
+defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
+ 0, 0>, VEX_4V;
+defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
+ 0, 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
+} // Constraints = "$src1 = $dst"
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Shuffle Instructions
+//===---------------------------------------------------------------------===//
let ExeDomain = SSEPackedInt in {
+multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
+ PatFrag bc_frag> {
+def ri : Ii8<0x70, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
+ (undef))))]>;
+def mi : Ii8<0x70, MRMSrcMem,
+ (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (vt (pshuf_frag:$src2
+ (bc_frag (memopv2i64 addr:$src1)),
+ (undef))))]>;
+}
+} // ExeDomain = SSEPackedInt
-// Shuffle and unpack instructions
-let AddedComplexity = 5 in {
-def PSHUFDri : PDIi8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v4i32 (pshufd:$src2
- VR128:$src1, (undef))))]>;
-def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v4i32 (pshufd:$src2
- (bc_v4i32 (memopv2i64 addr:$src1)),
- (undef))))]>;
-}
-
-// SSE2 with ImmT == Imm8 and XS prefix.
-def PSHUFHWri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshufhw:$src2 VR128:$src1,
- (undef))))]>,
- XS, Requires<[HasSSE2]>;
-def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshufhw:$src2
- (bc_v8i16 (memopv2i64 addr:$src1)),
- (undef))))]>,
- XS, Requires<[HasSSE2]>;
-
-// SSE2 with ImmT == Imm8 and XD prefix.
-def PSHUFLWri : Ii8<0x70, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
- "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshuflw:$src2 VR128:$src1,
- (undef))))]>,
- XD, Requires<[HasSSE2]>;
-def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
- (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
- "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- [(set VR128:$dst, (v8i16 (pshuflw:$src2
- (bc_v8i16 (memopv2i64 addr:$src1)),
- (undef))))]>,
- XD, Requires<[HasSSE2]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let AddedComplexity = 5 in
+ defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
+ VEX;
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
+ VEX;
-let Constraints = "$src1 = $dst" in {
- def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v16i8 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpcklbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2))))]>;
- def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
+ VEX;
+}
+
+let Predicates = [HasSSE2] in {
+ let AddedComplexity = 5 in
+ defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
+
+ // SSE2 with ImmT == Imm8 and XS prefix.
+ defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
+
+ // SSE2 with ImmT == Imm8 and XD prefix.
+ defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
+}
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Unpack Instructions
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
+ PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
+ def rr : PDI<opc, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
+ def rm : PDI<opc, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (unp_frag VR128:$src1,
+ (bc_frag (memopv2i64
+ addr:$src2))))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
+ 0>, VEX_4V;
+ defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
+ 0>, VEX_4V;
+ defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
+ 0>, VEX_4V;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
+ def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v8i16 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpcklwd\t{$src2, $dst|$dst, $src2}",
+ (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
+ def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
+ "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v8i16 (memopv2i64 addr:$src2))))]>;
- def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
+ (v2i64 (unpckl VR128:$src1,
+ (memopv2i64 addr:$src2))))]>, VEX_4V;
+
+ defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
+ 0>, VEX_4V;
+ defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
+ 0>, VEX_4V;
+ defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
+ 0>, VEX_4V;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
+ def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (v4i32 (unpckl VR128:$src1, VR128:$src2)))]>;
- def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
+ (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
+ def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckldq\t{$src2, $dst|$dst, $src2}",
+ "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set VR128:$dst,
- (unpckl VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>;
+ (v2i64 (unpckh VR128:$src1,
+ (memopv2i64 addr:$src2))))]>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
+ defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
+ defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"punpcklqdq\t{$src2, $dst|$dst, $src2}",
@@ -2338,39 +2602,12 @@ let Constraints = "$src1 = $dst" in {
(v2i64 (unpckl VR128:$src1,
(memopv2i64 addr:$src2))))]>;
- def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v16i8 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhbw\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v16i8 (memopv2i64 addr:$src2))))]>;
- def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v8i16 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhwd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v8i16 (memopv2i64 addr:$src2))))]>;
- def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (v4i32 (unpckh VR128:$src1, VR128:$src2)))]>;
- def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
- "punpckhdq\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
- (unpckh VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>;
+ defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
+ defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
+ defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
+
+ /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
+ /// knew to collapse (bitconvert VT to VT) into its operand.
def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
"punpckhqdq\t{$src2, $dst|$dst, $src2}",
@@ -2384,102 +2621,117 @@ let Constraints = "$src1 = $dst" in {
(memopv2i64 addr:$src2))))]>;
}
-// Extract / Insert
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Integer Extract and Insert
+//===---------------------------------------------------------------------===//
+
+let ExeDomain = SSEPackedInt in {
+multiclass sse2_pinsrw<bit Is2Addr = 1> {
+ def rri : Ii8<0xC4, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1,
+ GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
+ def rmi : Ii8<0xC4, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1,
+ i16mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
+ "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [(set VR128:$dst,
+ (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
+ imm:$src3))]>;
+}
+
+// Extract
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
+ (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
+ imm:$src2))]>, OpSize, VEX;
def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
(outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
"pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
imm:$src2))]>;
-let Constraints = "$src1 = $dst" in {
- def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1,
- GR32:$src2, i32i8imm:$src3),
- "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
- def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1,
- i16mem:$src2, i32i8imm:$src3),
- "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- [(set VR128:$dst,
- (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
- imm:$src3))]>;
-}
-// Mask creation
-def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
- "pmovmskb\t{$src, $dst|$dst, $src}",
- [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
+// Insert
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
-// Conditional store
-let Uses = [EDI] in
-def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
- "maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
-
-let Uses = [RDI] in
-def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
- "maskmovdqu\t{$mask, $src|$src, $mask}",
- [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
+let Constraints = "$src1 = $dst" in
+ defm VPINSRW : sse2_pinsrw, TB, OpSize;
} // ExeDomain = SSEPackedInt
-// Non-temporal stores
-def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
- "movntpd\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
-let ExeDomain = SSEPackedInt in
-def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
-def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
- "movnti\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
- TB, Requires<[HasSSE2]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Packed Mask Creation
+//===---------------------------------------------------------------------===//
-let AddedComplexity = 400 in { // Prefer non-temporal versions
-def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntpd\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
+let ExeDomain = SSEPackedInt in {
-let ExeDomain = SSEPackedInt in
-def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
- "movntdq\t{$src, $dst|$dst, $src}",
- [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
-}
+let isAsmParserOnly = 1 in
+def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
+def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
+ "pmovmskb\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
-// Flush cache
-def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
- "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
- TB, Requires<[HasSSE2]>;
+} // ExeDomain = SSEPackedInt
-// Load, store, and memory fence
-def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
- "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
-def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
- "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
+//===---------------------------------------------------------------------===//
+// SSE2 - Conditional Store
+//===---------------------------------------------------------------------===//
-// Pause. This "instruction" is encoded as "rep; nop", so even though it
-// was introduced with SSE2, it's backward compatible.
-def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
+let ExeDomain = SSEPackedInt in {
-//TODO: custom lower this so as to never even generate the noop
-def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
- (i8 0)), (NOOP)>;
-def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
-def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
-def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
- (i8 1)), (MFENCE)>;
+let isAsmParserOnly = 1 in {
+let Uses = [EDI] in
+def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
+ (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
+let Uses = [RDI] in
+def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
+ (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
+}
-// Alias instructions that map zero vector to pxor / xorp* for sse.
-// We set canFoldAsLoad because this can be converted to a constant-pool
-// load of an all-ones value if folding it would be beneficial.
-let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
- isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
- // FIXME: Change encoding to pseudo.
- def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
- [(set VR128:$dst, (v4i32 immAllOnesV))]>;
+let Uses = [EDI] in
+def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
+let Uses = [RDI] in
+def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
+ "maskmovdqu\t{$mask, $src|$src, $mask}",
+ [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
+
+} // ExeDomain = SSEPackedInt
+
+//===---------------------------------------------------------------------===//
+// SSE2 - Move Doubleword
+//===---------------------------------------------------------------------===//
+// Move Int Doubleword to Packed Double Int
+let isAsmParserOnly = 1 in {
+def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
+def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
+ VEX;
+}
def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2489,6 +2741,18 @@ def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
[(set VR128:$dst,
(v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
+
+// Move Int Doubleword to Single Scalar
+let isAsmParserOnly = 1 in {
+def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
+
+def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
+ VEX;
+}
def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert GR32:$src))]>;
@@ -2497,20 +2761,18 @@ def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
-// SSE2 instructions with XS prefix
-def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,
- (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
- Requires<[HasSSE2]>;
-def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(store (i64 (vector_extract (v2i64 VR128:$src),
- (iPTR 0))), addr:$dst)]>;
-
-def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
- (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
-
+// Move Packed Doubleword Int to Packed Double Int
+let isAsmParserOnly = 1 in {
+def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
+ (iPTR 0)))]>, VEX;
+def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
+ (ins i32mem:$dst, VR128:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (vector_extract (v4i32 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+}
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
@@ -2520,6 +2782,15 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
[(store (i32 (vector_extract (v4i32 VR128:$src),
(iPTR 0))), addr:$dst)]>;
+// Move Scalar Single to Double Int
+let isAsmParserOnly = 1 in {
+def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
+def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
+}
def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32:$src))]>;
@@ -2527,25 +2798,38 @@ def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
-// Store / copy lower 64-bits of a XMM register.
-def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
- "movq\t{$src, $dst|$dst, $src}",
- [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
-
// movd / movq to XMM register zero-extends
+let AddedComplexity = 15, isAsmParserOnly = 1 in {
+def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v4i32 (X86vzmovl
+ (v4i32 (scalar_to_vector GR32:$src)))))]>,
+ VEX;
+def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
+ [(set VR128:$dst, (v2i64 (X86vzmovl
+ (v2i64 (scalar_to_vector GR64:$src)))))]>,
+ VEX, VEX_W;
+}
let AddedComplexity = 15 in {
def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v4i32 (X86vzmovl
(v4i32 (scalar_to_vector GR32:$src)))))]>;
-// This is X86-64 only.
def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector GR64:$src)))))]>;
}
let AddedComplexity = 20 in {
+let isAsmParserOnly = 1 in
+def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
+ "movd\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
+ (loadi32 addr:$src))))))]>,
+ VEX;
def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
@@ -2558,13 +2842,63 @@ def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
(MOVZDI2PDIrm addr:$src)>;
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
(MOVZDI2PDIrm addr:$src)>;
+}
+//===---------------------------------------------------------------------===//
+// SSE2 - Move Quadword
+//===---------------------------------------------------------------------===//
+
+// Move Quadword Int to Packed Quadword Int
+let isAsmParserOnly = 1 in
+def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ VEX, Requires<[HasAVX]>;
+def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
+ Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
+
+// Move Packed Quadword Int to Quadword Int
+let isAsmParserOnly = 1 in
+def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (vector_extract (v2i64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>, VEX;
+def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(store (i64 (vector_extract (v2i64 VR128:$src),
+ (iPTR 0))), addr:$dst)]>;
+
+def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
+ (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
+
+// Store / copy lower 64-bits of a XMM register.
+let isAsmParserOnly = 1 in
+def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
+def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}",
+ [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
+
+let AddedComplexity = 20, isAsmParserOnly = 1 in
+def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst,
+ (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
+ (loadi64 addr:$src))))))]>,
+ XS, VEX, Requires<[HasAVX]>;
+
+let AddedComplexity = 20 in {
def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (X86vzmovl (v2i64 (scalar_to_vector
- (loadi64 addr:$src))))))]>, XS,
- Requires<[HasSSE2]>;
+ (loadi64 addr:$src))))))]>,
+ XS, Requires<[HasSSE2]>;
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
(MOVZQI2PQIrm addr:$src)>;
@@ -2575,12 +2909,23 @@ def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
// IA32 document. movq xmm1, xmm2 does clear the high bits.
+let isAsmParserOnly = 1, AddedComplexity = 15 in
+def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 15 in
def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
XS, Requires<[HasSSE2]>;
+let AddedComplexity = 20, isAsmParserOnly = 1 in
+def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovq\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (v2i64 (X86vzmovl
+ (loadv2i64 addr:$src))))]>,
+ XS, VEX, Requires<[HasAVX]>;
let AddedComplexity = 20 in {
def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movq\t{$src, $dst|$dst, $src}",
@@ -2592,49 +2937,163 @@ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
(MOVZPQILo2PQIrm addr:$src)>;
}
+// Instructions to match in the assembler
+let isAsmParserOnly = 1 in {
+// This instructions is in fact an alias to movd with 64 bit dst
+def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
+ "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
+}
+
// Instructions for the disassembler
// xr = XMM register
// xm = mem64
+let isAsmParserOnly = 1 in
+def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
"movq\t{$src, $dst|$dst, $src}", []>, XS;
//===---------------------------------------------------------------------===//
-// SSE3 Instructions
+// SSE2 - Misc Instructions
//===---------------------------------------------------------------------===//
-// Move Instructions
-def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movshdup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4f32 (movshdup
- VR128:$src, (undef))))]>;
-def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movshdup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (movshdup
- (memopv4f32 addr:$src), (undef)))]>;
+// Flush cache
+def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
+ "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
+ TB, Requires<[HasSSE2]>;
+
+// Load, store, and memory fence
+def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
+ "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
+def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
+ "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
+
+// Pause. This "instruction" is encoded as "rep; nop", so even though it
+// was introduced with SSE2, it's backward compatible.
+def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
+
+//TODO: custom lower this so as to never even generate the noop
+def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
+ (i8 0)), (NOOP)>;
+def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
+def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
+def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm),
+ (i8 1)), (MFENCE)>;
+
+// Alias instructions that map zero vector to pxor / xorp* for sse.
+// We set canFoldAsLoad because this can be converted to a constant-pool
+// load of an all-ones value if folding it would be beneficial.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
+ isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
+ // FIXME: Change encoding to pseudo.
+ def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
+ [(set VR128:$dst, (v4i32 immAllOnesV))]>;
-def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movsldup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (v4f32 (movsldup
+//===---------------------------------------------------------------------===//
+// SSE3 - Conversion Instructions
+//===---------------------------------------------------------------------===//
+
+// Convert Packed Double FP to Packed DW Integers
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+// The assembler can recognize rr 256-bit instructions by seeing a ymm
+// register, but the same isn't true when using memory operands instead.
+// Provide other assembly rr and rm forms to address this explicitly.
+def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// XMM only
+def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
+
+// YMM only
+def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
+ "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
+ "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
+}
+
+def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
+
+// Convert Packed DW Integers to Packed Double FP
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
+ "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
+}
+
+def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
+
+//===---------------------------------------------------------------------===//
+// SSE3 - Move Instructions
+//===---------------------------------------------------------------------===//
+
+// Replicate Single FP
+multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
+def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (v4f32 (rep_frag
VR128:$src, (undef))))]>;
-def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
- "movsldup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (movsldup
+def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (rep_frag
(memopv4f32 addr:$src), (undef)))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
+defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
+}
+defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
+defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
-def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
- "movddup\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
-def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
- "movddup\t{$src, $dst|$dst, $src}",
+// Replicate Double FP
+multiclass sse3_replicate_dfp<string OpcodeStr> {
+def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
+def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
(undef))))]>;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
+defm MOVDDUP : sse3_replicate_dfp<"movddup">;
+
+// Move Unaligned Integer
+let isAsmParserOnly = 1 in
+ def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vlddqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
+def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "lddqu\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
(undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
+// Several Move patterns
let AddedComplexity = 5 in {
def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
@@ -2646,52 +3105,98 @@ def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
(MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
}
-// Arithmetic
-let Constraints = "$src1 = $dst" in {
- def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "addsubps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
- VR128:$src2))]>;
- def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "addsubps\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
- (memop addr:$src2)))]>;
- def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- "addsubpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
- VR128:$src2))]>;
- def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
- (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- "addsubpd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
- (memop addr:$src2)))]>;
+// vector_shuffle v1, <undef> <1, 1, 3, 3>
+let AddedComplexity = 15 in
+def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
+ (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
+let AddedComplexity = 20 in
+def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
+ (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
+
+// vector_shuffle v1, <undef> <0, 0, 2, 2>
+let AddedComplexity = 15 in
+ def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
+ (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
+let AddedComplexity = 20 in
+ def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
+ (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
+
+//===---------------------------------------------------------------------===//
+// SSE3 - Arithmetic
+//===---------------------------------------------------------------------===//
+
+multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, bit Is2Addr = 1> {
+ def rr : I<0xD0, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (Int VR128:$src1,
+ VR128:$src2))]>;
+ def rm : I<0xD0, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (Int VR128:$src1,
+ (memop addr:$src2)))]>;
+
}
-def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
- "lddqu\t{$src, $dst|$dst, $src}",
- [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX],
+ ExeDomain = SSEPackedDouble in {
+ defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", 0>, XD,
+ VEX_4V;
+ defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", 0>, OpSize,
+ VEX_4V;
+}
+let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
+ ExeDomain = SSEPackedDouble in {
+ defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps">, XD;
+ defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd">, TB, OpSize;
+}
+
+//===---------------------------------------------------------------------===//
+// SSE3 Instructions
+//===---------------------------------------------------------------------===//
// Horizontal ops
-class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
+class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
: S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
-class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
+class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
: S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
+ !if(Is2Addr,
!strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
-class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
+class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
: S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
-class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
+class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1>
: S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
[(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ def VHADDPSrr : S3D_Intrr<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
+ def VHADDPSrm : S3D_Intrm<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V;
+ def VHADDPDrr : S3_Intrr <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
+ def VHADDPDrm : S3_Intrm <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V;
+ def VHSUBPSrr : S3D_Intrr<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
+ def VHSUBPSrm : S3D_Intrm<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V;
+ def VHSUBPDrr : S3_Intrr <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
+ def VHSUBPDrm : S3_Intrm <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V;
+}
+
let Constraints = "$src1 = $dst" in {
def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
@@ -2703,35 +3208,14 @@ let Constraints = "$src1 = $dst" in {
def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
}
-// Thread synchronization
-def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
- [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
-def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
- [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
-
-// vector_shuffle v1, <undef> <1, 1, 3, 3>
-let AddedComplexity = 15 in
-def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
- (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
-let AddedComplexity = 20 in
-def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
- (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
-
-// vector_shuffle v1, <undef> <0, 0, 2, 2>
-let AddedComplexity = 15 in
- def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
- (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
-let AddedComplexity = 20 in
- def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
- (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
-
//===---------------------------------------------------------------------===//
-// SSSE3 Instructions
+// SSSE3 - Packed Absolute Instructions
//===---------------------------------------------------------------------===//
-/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
-multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
+/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
+multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag64, PatFrag mem_frag128,
+ Intrinsic IntId64, Intrinsic IntId128> {
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst, (IntId64 VR64:$src))]>;
@@ -2739,7 +3223,7 @@ multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR64:$dst,
- (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
+ (IntId64 (bitconvert (mem_frag64 addr:$src))))]>;
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src),
@@ -2752,240 +3236,203 @@ multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
[(set VR128:$dst,
(IntId128
- (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
+ (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
}
-/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
-multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
-
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst,
- (IntId64
- (bitconvert (memopv4i16 addr:$src))))]>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8,
+ int_x86_ssse3_pabs_b,
+ int_x86_ssse3_pabs_b_128>, VEX;
+ defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_pabs_w,
+ int_x86_ssse3_pabs_w_128>, VEX;
+ defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32,
+ int_x86_ssse3_pabs_d,
+ int_x86_ssse3_pabs_d_128>, VEX;
+}
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>,
- OpSize;
+defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8,
+ int_x86_ssse3_pabs_b,
+ int_x86_ssse3_pabs_b_128>;
+defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_pabs_w,
+ int_x86_ssse3_pabs_w_128>;
+defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32,
+ int_x86_ssse3_pabs_d,
+ int_x86_ssse3_pabs_d_128>;
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
-}
+//===---------------------------------------------------------------------===//
+// SSSE3 - Packed Binary Operator Instructions
+//===---------------------------------------------------------------------===//
-/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
-multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128> {
+/// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
+multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ PatFrag mem_frag64, PatFrag mem_frag128,
+ Intrinsic IntId64, Intrinsic IntId128,
+ bit Is2Addr = 1> {
+ let isCommutable = 1 in
def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst, (IntId64 VR64:$src))]>;
-
+ (ins VR64:$src1, VR64:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>;
def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins i64mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR64:$dst,
- (IntId64
- (bitconvert (memopv2i32 addr:$src))))]>;
+ (ins VR64:$src1, i64mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR64:$dst,
+ (IntId64 VR64:$src1,
+ (bitconvert (memopv8i8 addr:$src2))))]>;
+ let isCommutable = 1 in
def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>,
- OpSize;
-
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
- int_x86_ssse3_pabs_b,
- int_x86_ssse3_pabs_b_128>;
-defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
- int_x86_ssse3_pabs_w,
- int_x86_ssse3_pabs_w_128>;
-defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
- int_x86_ssse3_pabs_d,
- int_x86_ssse3_pabs_d_128>;
-
-/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv8i8 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+let isCommutable = 0 in {
+ defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phadd_w,
+ int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
+ defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32,
+ int_x86_ssse3_phadd_d,
+ int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
+ defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phadd_sw,
+ int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
+ defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phsub_w,
+ int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
+ defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32,
+ int_x86_ssse3_phsub_d,
+ int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
+ defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phsub_sw,
+ int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
+ defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8,
+ int_x86_ssse3_pmadd_ub_sw,
+ int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
+ defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8,
+ int_x86_ssse3_pshuf_b,
+ int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
+ defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8,
+ int_x86_ssse3_psign_b,
+ int_x86_ssse3_psign_b_128, 0>, VEX_4V;
+ defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16,
+ int_x86_ssse3_psign_w,
+ int_x86_ssse3_psign_w_128, 0>, VEX_4V;
+ defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32,
+ int_x86_ssse3_psign_d,
+ int_x86_ssse3_psign_d_128, 0>, VEX_4V;
}
-
-/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv4i16 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
- }
+defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_pmul_hr_sw,
+ int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
}
-/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
-let Constraints = "$src1 = $dst" in {
- multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
- Intrinsic IntId64, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
- let isCommutable = Commutable;
- }
- def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR64:$dst,
- (IntId64 VR64:$src1,
- (bitconvert (memopv2i32 addr:$src2))))]>;
-
- def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
- }
+// None of these have i8 immediate fields.
+let ImmT = NoImm, Constraints = "$src1 = $dst" in {
+let isCommutable = 0 in {
+ defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phadd_w,
+ int_x86_ssse3_phadd_w_128>;
+ defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32,
+ int_x86_ssse3_phadd_d,
+ int_x86_ssse3_phadd_d_128>;
+ defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phadd_sw,
+ int_x86_ssse3_phadd_sw_128>;
+ defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phsub_w,
+ int_x86_ssse3_phsub_w_128>;
+ defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32,
+ int_x86_ssse3_phsub_d,
+ int_x86_ssse3_phsub_d_128>;
+ defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_phsub_sw,
+ int_x86_ssse3_phsub_sw_128>;
+ defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8,
+ int_x86_ssse3_pmadd_ub_sw,
+ int_x86_ssse3_pmadd_ub_sw_128>;
+ defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8,
+ int_x86_ssse3_pshuf_b,
+ int_x86_ssse3_pshuf_b_128>;
+ defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8,
+ int_x86_ssse3_psign_b,
+ int_x86_ssse3_psign_b_128>;
+ defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16,
+ int_x86_ssse3_psign_w,
+ int_x86_ssse3_psign_w_128>;
+ defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32,
+ int_x86_ssse3_psign_d,
+ int_x86_ssse3_psign_d_128>;
+}
+defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16,
+ int_x86_ssse3_pmul_hr_sw,
+ int_x86_ssse3_pmul_hr_sw_128>;
}
-let ImmT = NoImm in { // None of these have i8 immediate fields.
-defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
- int_x86_ssse3_phadd_w,
- int_x86_ssse3_phadd_w_128>;
-defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
- int_x86_ssse3_phadd_d,
- int_x86_ssse3_phadd_d_128>;
-defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
- int_x86_ssse3_phadd_sw,
- int_x86_ssse3_phadd_sw_128>;
-defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
- int_x86_ssse3_phsub_w,
- int_x86_ssse3_phsub_w_128>;
-defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
- int_x86_ssse3_phsub_d,
- int_x86_ssse3_phsub_d_128>;
-defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
- int_x86_ssse3_phsub_sw,
- int_x86_ssse3_phsub_sw_128>;
-defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
- int_x86_ssse3_pmadd_ub_sw,
- int_x86_ssse3_pmadd_ub_sw_128>;
-defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
- int_x86_ssse3_pmul_hr_sw,
- int_x86_ssse3_pmul_hr_sw_128, 1>;
-
-defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
- int_x86_ssse3_pshuf_b,
- int_x86_ssse3_pshuf_b_128>;
-defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
- int_x86_ssse3_psign_b,
- int_x86_ssse3_psign_b_128>;
-defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
- int_x86_ssse3_psign_w,
- int_x86_ssse3_psign_w_128>;
-defm PSIGND : SS3I_binop_rm_int_32<0x0A, "psignd",
- int_x86_ssse3_psign_d,
- int_x86_ssse3_psign_d_128>;
-}
-
-// palignr patterns.
-let Constraints = "$src1 = $dst" in {
- def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
- (ins VR64:$src1, VR64:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
- def PALIGNR64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
- (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>;
-
- def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>, OpSize;
- def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
- []>, OpSize;
+def : Pat<(X86pshufb VR128:$src, VR128:$mask),
+ (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
+def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
+ (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
+
+//===---------------------------------------------------------------------===//
+// SSSE3 - Packed Align Instruction Patterns
+//===---------------------------------------------------------------------===//
+
+multiclass sse3_palign<string asm, bit Is2Addr = 1> {
+ def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
+ (ins VR64:$src1, VR64:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>;
+ def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
+ (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>;
+
+ def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
+ def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ []>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PALIGN : sse3_palign<"palignr">;
+
let AddedComplexity = 5 in {
def : Pat<(v1i64 (palign:$src3 VR64:$src1, VR64:$src2)),
@@ -2996,10 +3443,6 @@ def : Pat<(v2i32 (palign:$src3 VR64:$src1, VR64:$src2)),
(PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
Requires<[HasSSSE3]>;
-def : Pat<(v2f32 (palign:$src3 VR64:$src1, VR64:$src2)),
- (PALIGNR64rr VR64:$src2, VR64:$src1,
- (SHUFFLE_get_palign_imm VR64:$src3))>,
- Requires<[HasSSSE3]>;
def : Pat<(v4i16 (palign:$src3 VR64:$src1, VR64:$src2)),
(PALIGNR64rr VR64:$src2, VR64:$src1,
(SHUFFLE_get_palign_imm VR64:$src3))>,
@@ -3027,10 +3470,15 @@ def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
Requires<[HasSSSE3]>;
}
-def : Pat<(X86pshufb VR128:$src, VR128:$mask),
- (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
-def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
- (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
+//===---------------------------------------------------------------------===//
+// SSSE3 Misc Instructions
+//===---------------------------------------------------------------------===//
+
+// Thread synchronization
+def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
+ [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
+def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
+ [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
//===---------------------------------------------------------------------===//
// Non-Instruction Patterns
@@ -3311,287 +3759,9 @@ def : Pat<(store (v16i8 VR128:$src), addr:$dst),
(MOVUPSmr addr:$dst, VR128:$src)>;
//===----------------------------------------------------------------------===//
-// SSE4.1 Instructions
+// SSE4.1 - Packed Move with Sign/Zero Extend
//===----------------------------------------------------------------------===//
-multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
- string OpcodeStr,
- Intrinsic V4F32Int,
- Intrinsic V2F64Int> {
- // Intrinsic operation, reg.
- // Vector intrinsic operation, reg
- def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
- OpSize;
-
- // Vector intrinsic operation, mem
- def PSm_Int : Ii8<opcps, MRMSrcMem,
- (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst,
- (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
- TA, OpSize,
- Requires<[HasSSE41]>;
-
- // Vector intrinsic operation, reg
- def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
- (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
- OpSize;
-
- // Vector intrinsic operation, mem
- def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
- (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
- !strconcat(OpcodeStr,
- "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- [(set VR128:$dst,
- (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
- OpSize;
-}
-
-let Constraints = "$src1 = $dst" in {
-multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
- string OpcodeStr,
- Intrinsic F32Int,
- Intrinsic F64Int> {
- // Intrinsic operation, reg.
- def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, mem.
- def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, reg.
- def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
- (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize;
-
- // Intrinsic operation, mem.
- def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
- (outs VR128:$dst),
- (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
- OpSize;
-}
-}
-
-// FP round - roundss, roundps, roundsd, roundpd
-defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
- int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
-defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
- int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
-
-// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
-multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128> {
- def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
- def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins i128mem:$src),
- !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
- [(set VR128:$dst,
- (IntId128
- (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
-}
-
-defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
- int_x86_sse41_phminposuw>;
-
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
-}
-
-defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
- int_x86_sse41_pcmpeqq, 1>;
-defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
- int_x86_sse41_packusdw, 0>;
-defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
- int_x86_sse41_pminsb, 1>;
-defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
- int_x86_sse41_pminsd, 1>;
-defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
- int_x86_sse41_pminud, 1>;
-defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
- int_x86_sse41_pminuw, 1>;
-defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
- int_x86_sse41_pmaxsb, 1>;
-defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
- int_x86_sse41_pmaxsd, 1>;
-defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
- int_x86_sse41_pmaxud, 1>;
-defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
- int_x86_sse41_pmaxuw, 1>;
-
-defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq, 1>;
-
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
- (PCMPEQQrr VR128:$src1, VR128:$src2)>;
-def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
- (PCMPEQQrm VR128:$src1, addr:$src2)>;
-
-/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
- SDNode OpNode, Intrinsic IntId128,
- bit Commutable = 0> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode (OpVT VR128:$src1),
- VR128:$src2))]>, OpSize {
- let isCommutable = Commutable;
- }
- def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (OpVT (OpNode VR128:$src1, (memop addr:$src2))))]>, OpSize;
- def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1, (memop addr:$src2)))]>,
- OpSize;
- }
-}
-
-/// SS48I_binop_rm - Simple SSE41 binary operator.
-let Constraints = "$src1 = $dst" in {
-multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
- ValueType OpVT, bit Commutable = 0> {
- def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (OpNode VR128:$src1,
- (bc_v4i32 (memopv2i64 addr:$src2))))]>,
- OpSize;
-}
-}
-
-defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32, 1>;
-
-/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
- OpSize;
- }
-}
-
-defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
- int_x86_sse41_blendps, 0>;
-defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
- int_x86_sse41_blendpd, 0>;
-defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
- int_x86_sse41_pblendw, 0>;
-defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
- int_x86_sse41_dpps, 1>;
-defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
- int_x86_sse41_dppd, 1>;
-defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
- int_x86_sse41_mpsadbw, 0>;
-
-
-/// SS41I_ternary_int - SSE 4.1 ternary operator
-let Uses = [XMM0], Constraints = "$src1 = $dst" in {
- multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
- def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr,
- "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
- [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
- OpSize;
-
- def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr,
- "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
- [(set VR128:$dst,
- (IntId VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
- }
-}
-
-defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
-defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
-defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
-
-
multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
@@ -3604,6 +3774,21 @@ multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
+ VEX;
+defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
+ VEX;
+defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
+ VEX;
+defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
+ VEX;
+defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
+ VEX;
+defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
+ VEX;
+}
+
defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
@@ -3655,6 +3840,17 @@ multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
+ VEX;
+defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
+ VEX;
+defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
+ VEX;
+defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
+ VEX;
+}
+
defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
@@ -3685,6 +3881,12 @@ multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
+ VEX;
+defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
+ VEX;
+}
defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
@@ -3699,6 +3901,9 @@ def : Pat<(int_x86_sse41_pmovzxbq
(v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
(PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Extract Instructions
+//===----------------------------------------------------------------------===//
/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
@@ -3718,6 +3923,9 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
+
defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
@@ -3733,6 +3941,9 @@ multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
+
defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
@@ -3752,8 +3963,31 @@ multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
addr:$dst)]>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
+
defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
+/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
+multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
+ def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
+ (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set GR64:$dst,
+ (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
+ def mr : SS4AIi8<opc, MRMDestMem, (outs),
+ (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
+ addr:$dst)]>, OpSize, REX_W;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
+
+defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
/// destination
@@ -3773,6 +4007,8 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
addr:$dst)]>, OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
// Also match an EXTRACTPS store when the store is done as f32 instead of i32.
@@ -3782,78 +4018,530 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
(EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
Requires<[HasSSE41]>;
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
- imm:$src3))]>, OpSize;
- }
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Insert Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
+ imm:$src3))]>, OpSize;
}
-defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
+
+multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
+ OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
+ imm:$src3)))]>, OpSize;
+}
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
- OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
- imm:$src3)))]>, OpSize;
- }
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
+
+multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
+ OpSize;
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
+ imm:$src3)))]>, OpSize;
}
-defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
+let Constraints = "$src1 = $dst" in
+ defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
// insertps has a few different modes, there's the first two here below which
// are optimized inserts that won't zero arbitrary elements in the destination
// vector. The next one matches the intrinsic and could zero arbitrary elements
// in the target vector.
-let Constraints = "$src1 = $dst" in {
- multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
- def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
+multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
+ def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
OpSize;
- def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set VR128:$dst,
- (X86insrtps VR128:$src1,
- (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
- imm:$src3))]>, OpSize;
- }
+ def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(asm,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (X86insrtps VR128:$src1,
+ (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
+ imm:$src3))]>, OpSize;
}
-defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+let Constraints = "$src1 = $dst" in
+ defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
(INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>;
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Round Instructions
+//===----------------------------------------------------------------------===//
+
+multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd,
+ string OpcodeStr,
+ Intrinsic V4F32Int,
+ Intrinsic V2F64Int> {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
+ OpSize;
+
+ // Vector intrinsic operation, mem
+ def PSm_Int : Ii8<opcps, MRMSrcMem,
+ (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
+ TA, OpSize,
+ Requires<[HasSSE41]>;
+
+ // Vector intrinsic operation, reg
+ def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
+ OpSize;
+
+ // Vector intrinsic operation, mem
+ def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
+ (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set VR128:$dst,
+ (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
+ OpSize;
+}
+
+multiclass sse41_fp_unop_rm_avx<bits<8> opcps, bits<8> opcpd,
+ string OpcodeStr> {
+ // Intrinsic operation, reg.
+ // Vector intrinsic operation, reg
+ def PSr : SS4AIi8<opcps, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+
+ // Vector intrinsic operation, mem
+ def PSm : Ii8<opcps, MRMSrcMem,
+ (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, TA, OpSize, Requires<[HasSSE41]>;
+
+ // Vector intrinsic operation, reg
+ def PDr : SS4AIi8<opcpd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+
+ // Vector intrinsic operation, mem
+ def PDm : SS4AIi8<opcpd, MRMSrcMem,
+ (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []>, OpSize;
+}
+
+multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
+ string OpcodeStr,
+ Intrinsic F32Int,
+ Intrinsic F64Int, bit Is2Addr = 1> {
+ // Intrinsic operation, reg.
+ def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, mem.
+ def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, reg.
+ def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
+ OpSize;
+
+ // Intrinsic operation, mem.
+ def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
+ OpSize;
+}
+
+multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd,
+ string OpcodeStr> {
+ // Intrinsic operation, reg.
+ def SSr : SS4AIi8<opcss, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, mem.
+ def SSm : SS4AIi8<opcss, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, reg.
+ def SDr : SS4AIi8<opcsd, MRMSrcReg,
+ (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+
+ // Intrinsic operation, mem.
+ def SDm : SS4AIi8<opcsd, MRMSrcMem,
+ (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
+ !strconcat(OpcodeStr,
+ "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ []>, OpSize;
+}
+
+// FP round - roundss, roundps, roundsd, roundpd
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ // Intrinsic form
+ defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround",
+ int_x86_sse41_round_ps, int_x86_sse41_round_pd>,
+ VEX;
+ defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
+ int_x86_sse41_round_ss, int_x86_sse41_round_sd,
+ 0>, VEX_4V;
+ // Instructions for the assembler
+ defm VROUND : sse41_fp_unop_rm_avx<0x08, 0x09, "vround">, VEX;
+ defm VROUND : sse41_fp_binop_rm_avx<0x0A, 0x0B, "vround">, VEX_4V;
+}
+
+defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round",
+ int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
+let Constraints = "$src1 = $dst" in
+defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
+ int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
+
+//===----------------------------------------------------------------------===//
+// SSE4.1 - Misc Instructions
+//===----------------------------------------------------------------------===//
+
+// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
+multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128> {
+ def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
+ def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src),
+ !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
+ [(set VR128:$dst,
+ (IntId128
+ (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
+ int_x86_sse41_phminposuw>, VEX;
+defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
+ int_x86_sse41_phminposuw>;
+
+/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
+multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
+ def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let isCommutable = 0 in
+ defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
+ 0>, VEX_4V;
+ defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
+ 0>, VEX_4V;
+ defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
+ 0>, VEX_4V;
+ defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
+ 0>, VEX_4V;
+ defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
+ 0>, VEX_4V;
+ defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
+ 0>, VEX_4V;
+ defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
+ 0>, VEX_4V;
+ defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
+ 0>, VEX_4V;
+ defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
+ 0>, VEX_4V;
+ defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
+ 0>, VEX_4V;
+ defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
+ 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ let isCommutable = 0 in
+ defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
+ defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
+ defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
+ defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
+ defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
+ defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
+ defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
+ defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
+ defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
+ defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
+ defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
+}
+
+def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
+ (PCMPEQQrr VR128:$src1, VR128:$src2)>;
+def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
+ (PCMPEQQrm VR128:$src1, addr:$src2)>;
+
+/// SS48I_binop_rm - Simple SSE41 binary operator.
+multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ ValueType OpVT, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
+ OpSize;
+ def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (OpNode VR128:$src1,
+ (bc_v4i32 (memopv2i64 addr:$src2))))]>,
+ OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
+
+/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
+multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ let isCommutable = 1 in
+ def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
+ OpSize;
+ def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
+ OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ let isCommutable = 0 in {
+ defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
+ 0>, VEX_4V;
+ defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
+ 0>, VEX_4V;
+ defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
+ 0>, VEX_4V;
+ defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
+ 0>, VEX_4V;
+ }
+ defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
+ 0>, VEX_4V;
+ defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
+ 0>, VEX_4V;
+}
+
+let Constraints = "$src1 = $dst" in {
+ let isCommutable = 0 in {
+ defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps>;
+ defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd>;
+ defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw>;
+ defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw>;
+ }
+ defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps>;
+ defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd>;
+}
+
+/// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+ multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr> {
+ def rr : I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+
+ def rm : I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, VR128:$src3),
+ !strconcat(OpcodeStr,
+ "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
+ [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
+ }
+}
+
+defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd">;
+defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps">;
+defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb">;
+
+/// SS41I_ternary_int - SSE 4.1 ternary operator
+let Uses = [XMM0], Constraints = "$src1 = $dst" in {
+ multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
+ def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr,
+ "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
+ [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
+ OpSize;
+
+ def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !strconcat(OpcodeStr,
+ "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
+ [(set VR128:$dst,
+ (IntId VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
+ }
+}
+
+defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
+defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
+defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
+
// ptest instruction we'll lower to this in X86ISelLowering primarily from
// the intel intrinsic that corresponds to this.
+let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
+def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>,
+ OpSize, VEX;
+def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
+ "vptest\t{$src2, $src1|$src1, $src2}",
+ [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>,
+ OpSize, VEX;
+}
+
let Defs = [EFLAGS] in {
def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
"ptest \t{$src2, $src1|$src1, $src2}",
@@ -3865,43 +4553,207 @@ def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
OpSize;
}
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+ "vmovntdqa\t{$src, $dst|$dst, $src}",
+ [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
+ OpSize, VEX;
def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
"movntdqa\t{$src, $dst|$dst, $src}",
[(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
OpSize;
-
//===----------------------------------------------------------------------===//
-// SSE4.2 Instructions
+// SSE4.2 - Compare Instructions
//===----------------------------------------------------------------------===//
/// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
-let Constraints = "$src1 = $dst" in {
- multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
+multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
+ def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
+let isAsmParserOnly = 1, Predicates = [HasAVX] in
+ defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
+ 0>, VEX_4V;
+let Constraints = "$src1 = $dst" in
+ defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
(PCMPGTQrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
(PCMPGTQrm VR128:$src1, addr:$src2)>;
+//===----------------------------------------------------------------------===//
+// SSE4.2 - String/text Processing Instructions
+//===----------------------------------------------------------------------===//
+
+// Packed Compare Implicit Length Strings, Return Mask
+let Defs = [EFLAGS], usesCustomInserter = 1 in {
+ def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "#PCMPISTRM128rr PSEUDO!",
+ [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
+ imm:$src3))]>, OpSize;
+ def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "#PCMPISTRM128rm PSEUDO!",
+ [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
+ VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize;
+}
+
+let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
+ Predicates = [HasAVX] in {
+ def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+ def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
+}
+
+let Defs = [XMM0, EFLAGS] in {
+ def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+ def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
+}
+
+// Packed Compare Explicit Length Strings, Return Mask
+let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
+ def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ "#PCMPESTRM128rr PSEUDO!",
+ [(set VR128:$dst,
+ (int_x86_sse42_pcmpestrm128
+ VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
+
+ def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ "#PCMPESTRM128rm PSEUDO!",
+ [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
+ VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
+ OpSize;
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX],
+ Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+ def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+ def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
+}
+
+let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
+ def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+ def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
+}
+
+// Packed Compare Implicit Length Strings, Return Index
+let Defs = [ECX, EFLAGS] in {
+ multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
+ def rr : SS42AI<0x63, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
+ (implicit EFLAGS)]>, OpSize;
+ def rm : SS42AI<0x63, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
+ !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
+ [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
+ (implicit EFLAGS)]>, OpSize;
+ }
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
+ VEX;
+defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
+ VEX;
+}
+
+defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
+defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
+defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
+defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
+defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
+defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
+
+// Packed Compare Explicit Length Strings, Return Index
+let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
+ multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
+ def rr : SS42AI<0x61, MRMSrcReg, (outs),
+ (ins VR128:$src1, VR128:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
+ (implicit EFLAGS)]>, OpSize;
+ def rm : SS42AI<0x61, MRMSrcMem, (outs),
+ (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
+ !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
+ [(set ECX,
+ (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
+ (implicit EFLAGS)]>, OpSize;
+ }
+}
+
+let isAsmParserOnly = 1, Predicates = [HasAVX] in {
+defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
+ VEX;
+defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
+ VEX;
+}
+
+defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
+defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
+defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
+defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
+defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
+defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
+
+//===----------------------------------------------------------------------===//
+// SSE4.2 - CRC Instructions
+//===----------------------------------------------------------------------===//
+
+// No CRC instructions have AVX equivalents
+
// crc intrinsic instruction
// This set of instructions are only rm, the only difference is the size
// of r and m.
@@ -3969,133 +4821,52 @@ let Constraints = "$src1 = $dst" in {
REX_W;
}
-// String/text processing instructions.
-let Defs = [EFLAGS], usesCustomInserter = 1 in {
-def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "#PCMPISTRM128rr PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
- imm:$src3))]>, OpSize;
-def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "#PCMPISTRM128rm PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, (load addr:$src2),
- imm:$src3))]>, OpSize;
-}
-
-let Defs = [XMM0, EFLAGS] in {
-def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
-def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
-}
-
-let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
-def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "#PCMPESTRM128rr PSEUDO!",
- [(set VR128:$dst,
- (int_x86_sse42_pcmpestrm128
- VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize;
-
-def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "#PCMPESTRM128rm PSEUDO!",
- [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
- VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>,
- OpSize;
-}
-
-let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
-def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
-def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
-}
+//===----------------------------------------------------------------------===//
+// AES-NI Instructions
+//===----------------------------------------------------------------------===//
-let Defs = [ECX, EFLAGS] in {
- multiclass SS42AI_pcmpistri<Intrinsic IntId128> {
- def rr : SS42AI<0x63, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src2, i8imm:$src3),
- "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
- (implicit EFLAGS)]>, OpSize;
- def rm : SS42AI<0x63, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
- "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}",
- [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
- (implicit EFLAGS)]>, OpSize;
- }
+multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
+ Intrinsic IntId128, bit Is2Addr = 1> {
+ def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
+ OpSize;
+ def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, i128mem:$src2),
+ !if(Is2Addr,
+ !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
+ !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+ [(set VR128:$dst,
+ (IntId128 VR128:$src1,
+ (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
}
-defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
-defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
-defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
-defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
-defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
-defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
-
-let Defs = [ECX, EFLAGS] in {
-let Uses = [EAX, EDX] in {
- multiclass SS42AI_pcmpestri<Intrinsic IntId128> {
- def rr : SS42AI<0x61, MRMSrcReg, (outs),
- (ins VR128:$src1, VR128:$src3, i8imm:$src5),
- "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
- [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
- (implicit EFLAGS)]>, OpSize;
- def rm : SS42AI<0x61, MRMSrcMem, (outs),
- (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
- "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}",
- [(set ECX,
- (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
- (implicit EFLAGS)]>, OpSize;
- }
-}
+// Perform One Round of an AES Encryption/Decryption Flow
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
+ int_x86_aesni_aesenc, 0>, VEX_4V;
+ defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
+ int_x86_aesni_aesenclast, 0>, VEX_4V;
+ defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
+ int_x86_aesni_aesdec, 0>, VEX_4V;
+ defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
+ int_x86_aesni_aesdeclast, 0>, VEX_4V;
}
-defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
-defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
-defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
-defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
-defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
-defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
-
-//===----------------------------------------------------------------------===//
-// AES-NI Instructions
-//===----------------------------------------------------------------------===//
-
let Constraints = "$src1 = $dst" in {
- multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
- Intrinsic IntId128, bit Commutable = 0> {
- def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
- (ins VR128:$src1, VR128:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
- OpSize {
- let isCommutable = Commutable;
- }
- def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
- (ins VR128:$src1, i128mem:$src2),
- !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
- [(set VR128:$dst,
- (IntId128 VR128:$src1,
- (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
- }
+ defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
+ int_x86_aesni_aesenc>;
+ defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
+ int_x86_aesni_aesenclast>;
+ defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
+ int_x86_aesni_aesdec>;
+ defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
+ int_x86_aesni_aesdeclast>;
}
-defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
- int_x86_aesni_aesenc>;
-defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
- int_x86_aesni_aesenclast>;
-defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
- int_x86_aesni_aesdec>;
-defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
- int_x86_aesni_aesdeclast>;
-
def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
(AESENCrr VR128:$src1, VR128:$src2)>;
def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
@@ -4113,13 +4884,27 @@ def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
(AESDECLASTrm VR128:$src1, addr:$src2)>;
+// Perform the AES InvMixColumn Transformation
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1),
+ "vaesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc VR128:$src1))]>,
+ OpSize, VEX;
+ def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1),
+ "vaesimc\t{$src1, $dst|$dst, $src1}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
+ OpSize, VEX;
+}
def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1),
"aesimc\t{$src1, $dst|$dst, $src1}",
[(set VR128:$dst,
(int_x86_aesni_aesimc VR128:$src1))]>,
OpSize;
-
def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(ins i128mem:$src1),
"aesimc\t{$src1, $dst|$dst, $src1}",
@@ -4127,6 +4912,22 @@ def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
(int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
OpSize;
+// AES Round Key Generation Assist
+let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
+ def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, i8imm:$src2),
+ "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
+ OpSize, VEX;
+ def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
+ (ins i128mem:$src1, i8imm:$src2),
+ "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
+ [(set VR128:$dst,
+ (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
+ imm:$src2))]>,
+ OpSize, VEX;
+}
def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, i8imm:$src2),
"aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
diff --git a/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
index a9681e6..23b0666 100644
--- a/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/X86MCCodeEmitter.cpp
@@ -30,7 +30,7 @@ class X86MCCodeEmitter : public MCCodeEmitter {
MCContext &Ctx;
bool Is64BitMode;
public:
- X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit)
+ X86MCCodeEmitter(TargetMachine &tm, MCContext &ctx, bool is64Bit)
: TM(tm), TII(*TM.getInstrInfo()), Ctx(ctx) {
Is64BitMode = is64Bit;
}
@@ -38,17 +38,18 @@ public:
~X86MCCodeEmitter() {}
unsigned getNumFixupKinds() const {
- return 4;
+ return 5;
}
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
const static MCFixupKindInfo Infos[] = {
{ "reloc_pcrel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
{ "reloc_pcrel_1byte", 0, 1 * 8, MCFixupKindInfo::FKF_IsPCRel },
+ { "reloc_pcrel_2byte", 0, 2 * 8, MCFixupKindInfo::FKF_IsPCRel },
{ "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
{ "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }
};
-
+
if (Kind < FirstTargetFixupKind)
return MCCodeEmitter::getFixupKindInfo(Kind);
@@ -56,16 +57,38 @@ public:
"Invalid kind!");
return Infos[Kind - FirstTargetFixupKind];
}
-
+
static unsigned GetX86RegNum(const MCOperand &MO) {
return X86RegisterInfo::getX86RegNum(MO.getReg());
}
-
+
+ // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
+ // 0-7 and the difference between the 2 groups is given by the REX prefix.
+ // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
+ // in 1's complement form, example:
+ //
+ // ModRM field => XMM9 => 1
+ // VEX.VVVV => XMM9 => ~9
+ //
+ // See table 4-35 of Intel AVX Programming Reference for details.
+ static unsigned char getVEXRegisterEncoding(const MCInst &MI,
+ unsigned OpNum) {
+ unsigned SrcReg = MI.getOperand(OpNum).getReg();
+ unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
+ if ((SrcReg >= X86::XMM8 && SrcReg <= X86::XMM15) ||
+ (SrcReg >= X86::YMM8 && SrcReg <= X86::YMM15))
+ SrcRegNum += 8;
+
+ // The registers represented through VEX_VVVV should
+ // be encoded in 1's complement form.
+ return (~SrcRegNum) & 0xf;
+ }
+
void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
}
-
+
void EmitConstant(uint64_t Val, unsigned Size, unsigned &CurByte,
raw_ostream &OS) const {
// Output the constant in little endian byte order.
@@ -75,38 +98,49 @@ public:
}
}
- void EmitImmediate(const MCOperand &Disp,
+ void EmitImmediate(const MCOperand &Disp,
unsigned ImmSize, MCFixupKind FixupKind,
unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
int ImmOffset = 0) const;
-
+
inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
unsigned RM) {
assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
return RM | (RegOpcode << 3) | (Mod << 6);
}
-
+
void EmitRegModRMByte(const MCOperand &ModRMReg, unsigned RegOpcodeFld,
unsigned &CurByte, raw_ostream &OS) const {
EmitByte(ModRMByte(3, RegOpcodeFld, GetX86RegNum(ModRMReg)), CurByte, OS);
}
-
+
void EmitSIBByte(unsigned SS, unsigned Index, unsigned Base,
unsigned &CurByte, raw_ostream &OS) const {
// SIB byte is in the same format as the ModRMByte.
EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
}
-
-
+
+
void EmitMemModRMByte(const MCInst &MI, unsigned Op,
- unsigned RegOpcodeField,
- unsigned TSFlags, unsigned &CurByte, raw_ostream &OS,
+ unsigned RegOpcodeField,
+ uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
-
+
void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
-
+
+ void EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ const MCInst &MI, const TargetInstrDesc &Desc,
+ raw_ostream &OS) const;
+
+ void EmitSegmentOverridePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ raw_ostream &OS) const;
+
+ void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ const MCInst &MI, const TargetInstrDesc &Desc,
+ raw_ostream &OS) const;
};
} // end anonymous namespace
@@ -124,24 +158,23 @@ MCCodeEmitter *llvm::createX86_64MCCodeEmitter(const Target &,
return new X86MCCodeEmitter(TM, Ctx, true);
}
-
-/// isDisp8 - Return true if this signed displacement fits in a 8-bit
-/// sign-extended field.
+/// isDisp8 - Return true if this signed displacement fits in a 8-bit
+/// sign-extended field.
static bool isDisp8(int Value) {
return Value == (signed char)Value;
}
/// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
/// in an instruction with the specified TSFlags.
-static MCFixupKind getImmFixupKind(unsigned TSFlags) {
+static MCFixupKind getImmFixupKind(uint64_t TSFlags) {
unsigned Size = X86II::getSizeOfImm(TSFlags);
bool isPCRel = X86II::isImmPCRel(TSFlags);
-
+
switch (Size) {
default: assert(0 && "Unknown immediate size");
case 1: return isPCRel ? MCFixupKind(X86::reloc_pcrel_1byte) : FK_Data_1;
+ case 2: return isPCRel ? MCFixupKind(X86::reloc_pcrel_2byte) : FK_Data_2;
case 4: return isPCRel ? MCFixupKind(X86::reloc_pcrel_4byte) : FK_Data_4;
- case 2: assert(!isPCRel); return FK_Data_2;
case 8: assert(!isPCRel); return FK_Data_8;
}
}
@@ -162,29 +195,30 @@ EmitImmediate(const MCOperand &DispOp, unsigned Size, MCFixupKind FixupKind,
// If we have an immoffset, add it to the expression.
const MCExpr *Expr = DispOp.getExpr();
-
+
// If the fixup is pc-relative, we need to bias the value to be relative to
// the start of the field, not the end of the field.
if (FixupKind == MCFixupKind(X86::reloc_pcrel_4byte) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
ImmOffset -= 4;
+ if (FixupKind == MCFixupKind(X86::reloc_pcrel_2byte))
+ ImmOffset -= 2;
if (FixupKind == MCFixupKind(X86::reloc_pcrel_1byte))
ImmOffset -= 1;
-
+
if (ImmOffset)
Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(ImmOffset, Ctx),
Ctx);
-
+
// Emit a symbolic constant as a fixup and 4 zeros.
Fixups.push_back(MCFixup::Create(CurByte, Expr, FixupKind));
EmitConstant(0, Size, CurByte, OS);
}
-
void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
- unsigned TSFlags, unsigned &CurByte,
+ uint64_t TSFlags, unsigned &CurByte,
raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const{
const MCOperand &Disp = MI.getOperand(Op+3);
@@ -192,43 +226,43 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
const MCOperand &Scale = MI.getOperand(Op+1);
const MCOperand &IndexReg = MI.getOperand(Op+2);
unsigned BaseReg = Base.getReg();
-
+
// Handle %rip relative addressing.
if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
- assert(IndexReg.getReg() == 0 && Is64BitMode &&
- "Invalid rip-relative address");
+ assert(Is64BitMode && "Rip-relative addressing requires 64-bit mode");
+ assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
-
+
unsigned FixupKind = X86::reloc_riprel_4byte;
-
+
// movq loads are handled with a special relocation form which allows the
// linker to eliminate some loads for GOT references which end up in the
// same linkage unit.
if (MI.getOpcode() == X86::MOV64rm ||
MI.getOpcode() == X86::MOV64rm_TC)
FixupKind = X86::reloc_riprel_4byte_movq_load;
-
+
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
// means that we need to bias the immediate field of the instruction with
// the size of the immediate field. If we have this case, add it into the
// expression to emit.
int ImmSize = X86II::hasImm(TSFlags) ? X86II::getSizeOfImm(TSFlags) : 0;
-
+
EmitImmediate(Disp, 4, MCFixupKind(FixupKind),
CurByte, OS, Fixups, -ImmSize);
return;
}
-
+
unsigned BaseRegNo = BaseReg ? GetX86RegNum(Base) : -1U;
-
+
// Determine whether a SIB byte is needed.
- // If no BaseReg, issue a RIP relative instruction only if the MCE can
+ // If no BaseReg, issue a RIP relative instruction only if the MCE can
// resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
// 2-7) and absolute references.
if (// The SIB byte must be used if there is an index register.
- IndexReg.getReg() == 0 &&
+ IndexReg.getReg() == 0 &&
// The SIB byte must be used if the base is ESP/RSP/R12, all of which
// encode to an R/M value of 4, which indicates that a SIB byte is
// present.
@@ -242,7 +276,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
return;
}
-
+
// If the base is not EBP/ESP and there is no displacement, use simple
// indirect register encoding, this handles addresses like [EAX]. The
// encoding for [EBP] with no displacement means [disp32] so we handle it
@@ -251,24 +285,24 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitByte(ModRMByte(0, RegOpcodeField, BaseRegNo), CurByte, OS);
return;
}
-
+
// Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
if (Disp.isImm() && isDisp8(Disp.getImm())) {
EmitByte(ModRMByte(1, RegOpcodeField, BaseRegNo), CurByte, OS);
EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
return;
}
-
+
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
return;
}
-
+
// We need a SIB byte, so start by outputting the ModR/M byte first
assert(IndexReg.getReg() != X86::ESP &&
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
-
+
bool ForceDisp32 = false;
bool ForceDisp8 = false;
if (BaseReg == 0) {
@@ -294,13 +328,13 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Emit the normal disp32 encoding.
EmitByte(ModRMByte(2, RegOpcodeField, 4), CurByte, OS);
}
-
+
// Calculate what the SS field value should be...
static const unsigned SSTable[] = { ~0, 0, 1, ~0, 2, ~0, ~0, ~0, 3 };
unsigned SS = SSTable[Scale.getImm()];
-
+
if (BaseReg == 0) {
- // Handle the SIB byte for the case where there is no base, see Intel
+ // Handle the SIB byte for the case where there is no base, see Intel
// Manual 2A, table 2-7. The displacement has already been output.
unsigned IndexRegNo;
if (IndexReg.getReg())
@@ -316,7 +350,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
EmitSIBByte(SS, IndexRegNo, GetX86RegNum(Base), CurByte, OS);
}
-
+
// Do we need to output a displacement?
if (ForceDisp8)
EmitImmediate(Disp, 1, FK_Data_1, CurByte, OS, Fixups);
@@ -324,26 +358,219 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
EmitImmediate(Disp, 4, FK_Data_4, CurByte, OS, Fixups);
}
+/// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
+/// called VEX.
+void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ const TargetInstrDesc &Desc,
+ raw_ostream &OS) const {
+ bool HasVEX_4V = false;
+ if (TSFlags & X86II::VEX_4V)
+ HasVEX_4V = true;
+
+ // VEX_R: opcode externsion equivalent to REX.R in
+ // 1's complement (inverted) form
+ //
+ // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX_R=1 (64 bit mode only)
+ //
+ unsigned char VEX_R = 0x1;
+
+ // VEX_X: equivalent to REX.X, only used when a
+ // register is used for index in SIB Byte.
+ //
+ // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX.X=1 (64-bit mode only)
+ unsigned char VEX_X = 0x1;
+
+ // VEX_B:
+ //
+ // 1: Same as REX_B=0 (ignored in 32-bit mode)
+ // 0: Same as REX_B=1 (64 bit mode only)
+ //
+ unsigned char VEX_B = 0x1;
+
+ // VEX_W: opcode specific (use like REX.W, or used for
+ // opcode extension, or ignored, depending on the opcode byte)
+ unsigned char VEX_W = 0;
+
+ // VEX_5M (VEX m-mmmmm field):
+ //
+ // 0b00000: Reserved for future use
+ // 0b00001: implied 0F leading opcode
+ // 0b00010: implied 0F 38 leading opcode bytes
+ // 0b00011: implied 0F 3A leading opcode bytes
+ // 0b00100-0b11111: Reserved for future use
+ //
+ unsigned char VEX_5M = 0x1;
+
+ // VEX_4V (VEX vvvv field): a register specifier
+ // (in 1's complement form) or 1111 if unused.
+ unsigned char VEX_4V = 0xf;
+
+ // VEX_L (Vector Length):
+ //
+ // 0: scalar or 128-bit vector
+ // 1: 256-bit vector
+ //
+ unsigned char VEX_L = 0;
+
+ // VEX_PP: opcode extension providing equivalent
+ // functionality of a SIMD prefix
+ //
+ // 0b00: None
+ // 0b01: 66
+ // 0b10: F3
+ // 0b11: F2
+ //
+ unsigned char VEX_PP = 0;
+
+ // Encode the operand size opcode prefix as needed.
+ if (TSFlags & X86II::OpSize)
+ VEX_PP = 0x01;
+
+ if (TSFlags & X86II::VEX_W)
+ VEX_W = 1;
+
+ if (TSFlags & X86II::VEX_L)
+ VEX_L = 1;
+
+ switch (TSFlags & X86II::Op0Mask) {
+ default: assert(0 && "Invalid prefix!");
+ case X86II::T8: // 0F 38
+ VEX_5M = 0x2;
+ break;
+ case X86II::TA: // 0F 3A
+ VEX_5M = 0x3;
+ break;
+ case X86II::TF: // F2 0F 38
+ VEX_PP = 0x3;
+ VEX_5M = 0x2;
+ break;
+ case X86II::XS: // F3 0F
+ VEX_PP = 0x2;
+ break;
+ case X86II::XD: // F2 0F
+ VEX_PP = 0x3;
+ break;
+ case X86II::TB: // Bypass: Not used by VEX
+ case 0:
+ break; // No prefix!
+ }
+
+ // Set the vector length to 256-bit if YMM0-YMM15 is used
+ for (unsigned i = 0; i != MI.getNumOperands(); ++i) {
+ if (!MI.getOperand(i).isReg())
+ continue;
+ unsigned SrcReg = MI.getOperand(i).getReg();
+ if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15)
+ VEX_L = 1;
+ }
+
+ unsigned NumOps = MI.getNumOperands();
+ unsigned CurOp = 0;
+
+ switch (TSFlags & X86II::FormMask) {
+ case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
+ case X86II::MRM0m: case X86II::MRM1m:
+ case X86II::MRM2m: case X86II::MRM3m:
+ case X86II::MRM4m: case X86II::MRM5m:
+ case X86II::MRM6m: case X86II::MRM7m:
+ case X86II::MRMDestMem:
+ NumOps = CurOp = X86::AddrNumOperands;
+ case X86II::MRMSrcMem:
+ case X86II::MRMSrcReg:
+ if (MI.getNumOperands() > CurOp && MI.getOperand(CurOp).isReg() &&
+ X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_R = 0x0;
+
+ // CurOp and NumOps are equal when VEX_R represents a register used
+ // to index a memory destination (which is the last operand)
+ CurOp = (CurOp == NumOps) ? 0 : CurOp+1;
+
+ if (HasVEX_4V) {
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+ CurOp++;
+ }
+
+ // If the last register should be encoded in the immediate field
+ // do not use any bit from VEX prefix to this register, ignore it
+ if (TSFlags & X86II::VEX_I8IMM)
+ NumOps--;
+
+ for (; CurOp != NumOps; ++CurOp) {
+ const MCOperand &MO = MI.getOperand(CurOp);
+ if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_B = 0x0;
+ if (!VEX_B && MO.isReg() &&
+ ((TSFlags & X86II::FormMask) == X86II::MRMSrcMem) &&
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_X = 0x0;
+ }
+ break;
+ default: // MRMDestReg, MRM0r-MRM7r
+ if (MI.getOperand(CurOp).isReg() &&
+ X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
+ VEX_B = 0;
+
+ if (HasVEX_4V)
+ VEX_4V = getVEXRegisterEncoding(MI, CurOp);
+
+ CurOp++;
+ for (; CurOp != NumOps; ++CurOp) {
+ const MCOperand &MO = MI.getOperand(CurOp);
+ if (MO.isReg() && !HasVEX_4V &&
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_R = 0x0;
+ }
+ break;
+ assert(0 && "Not implemented!");
+ }
+
+ // Emit segment override opcode prefix as needed.
+ EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
+ // VEX opcode prefix can have 2 or 3 bytes
+ //
+ // 3 bytes:
+ // +-----+ +--------------+ +-------------------+
+ // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
+ // +-----+ +--------------+ +-------------------+
+ // 2 bytes:
+ // +-----+ +-------------------+
+ // | C5h | | R | vvvv | L | pp |
+ // +-----+ +-------------------+
+ //
+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+
+ if (VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { // 2 byte VEX prefix
+ EmitByte(0xC5, CurByte, OS);
+ EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
+ return;
+ }
+
+ // 3 byte VEX prefix
+ EmitByte(0xC4, CurByte, OS);
+ EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS);
+ EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
+}
+
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
-static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
+static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
const TargetInstrDesc &Desc) {
- // Pseudo instructions never have a rex byte.
- if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
- return 0;
-
unsigned REX = 0;
if (TSFlags & X86II::REX_W)
- REX |= 1 << 3;
-
+ REX |= 1 << 3; // set REX.W
+
if (MI.getNumOperands() == 0) return REX;
-
+
unsigned NumOps = MI.getNumOperands();
// FIXME: MCInst should explicitize the two-addrness.
bool isTwoAddr = NumOps > 1 &&
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
-
+
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
unsigned i = isTwoAddr ? 1 : 0;
for (; i != NumOps; ++i) {
@@ -353,34 +580,34 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
if (!X86InstrInfo::isX86_64NonExtLowByteReg(Reg)) continue;
// FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
// that returns non-zero.
- REX |= 0x40;
+ REX |= 0x40; // REX fixed encoding prefix
break;
}
-
+
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
}
break;
case X86II::MRMSrcMem: {
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -391,17 +618,17 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
+ unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
i = isTwoAddr ? 1 : 0;
if (NumOps > e && MI.getOperand(e).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
for (; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -410,39 +637,40 @@ static unsigned DetermineREXPrefix(const MCInst &MI, unsigned TSFlags,
default:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
}
break;
}
return REX;
}
-void X86MCCodeEmitter::
-EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- unsigned Opcode = MI.getOpcode();
- const TargetInstrDesc &Desc = TII.get(Opcode);
- unsigned TSFlags = Desc.TSFlags;
-
- // Keep track of the current byte being emitted.
- unsigned CurByte = 0;
-
- // FIXME: We should emit the prefixes in exactly the same order as GAS does,
- // in order to provide diffability.
-
- // Emit the lock opcode prefix as needed.
- if (TSFlags & X86II::LOCK)
- EmitByte(0xF0, CurByte, OS);
-
- // Emit segment override opcode prefix as needed.
+/// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
+void X86MCCodeEmitter::EmitSegmentOverridePrefix(uint64_t TSFlags,
+ unsigned &CurByte, int MemOperand,
+ const MCInst &MI,
+ raw_ostream &OS) const {
switch (TSFlags & X86II::SegOvrMask) {
default: assert(0 && "Invalid segment!");
- case 0: break; // No segment override!
+ case 0:
+ // No segment override, check for explicit one on memory operand.
+ if (MemOperand != -1) { // If the instruction has a memory operand.
+ switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
+ default: assert(0 && "Unknown segment register!");
+ case 0: break;
+ case X86::CS: EmitByte(0x2E, CurByte, OS); break;
+ case X86::SS: EmitByte(0x36, CurByte, OS); break;
+ case X86::DS: EmitByte(0x3E, CurByte, OS); break;
+ case X86::ES: EmitByte(0x26, CurByte, OS); break;
+ case X86::FS: EmitByte(0x64, CurByte, OS); break;
+ case X86::GS: EmitByte(0x65, CurByte, OS); break;
+ }
+ }
+ break;
case X86II::FS:
EmitByte(0x64, CurByte, OS);
break;
@@ -450,19 +678,36 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0x65, CurByte, OS);
break;
}
-
+}
+
+/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+///
+/// MemOperand is the operand # of the start of a memory operand if present. If
+/// Not present, it is -1.
+void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+ int MemOperand, const MCInst &MI,
+ const TargetInstrDesc &Desc,
+ raw_ostream &OS) const {
+
+ // Emit the lock opcode prefix as needed.
+ if (TSFlags & X86II::LOCK)
+ EmitByte(0xF0, CurByte, OS);
+
+ // Emit segment override opcode prefix as needed.
+ EmitSegmentOverridePrefix(TSFlags, CurByte, MemOperand, MI, OS);
+
// Emit the repeat opcode prefix as needed.
if ((TSFlags & X86II::Op0Mask) == X86II::REP)
EmitByte(0xF3, CurByte, OS);
-
+
// Emit the operand size opcode prefix as needed.
if (TSFlags & X86II::OpSize)
EmitByte(0x66, CurByte, OS);
-
+
// Emit the address size opcode prefix as needed.
if (TSFlags & X86II::AdSize)
EmitByte(0x67, CurByte, OS);
-
+
bool Need0FPrefix = false;
switch (TSFlags & X86II::Op0Mask) {
default: assert(0 && "Invalid prefix!");
@@ -494,18 +739,18 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::DE: EmitByte(0xDE, CurByte, OS); break;
case X86II::DF: EmitByte(0xDF, CurByte, OS); break;
}
-
+
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
if (Is64BitMode) {
if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
EmitByte(0x40 | REX, CurByte, OS);
}
-
+
// 0x0F escape code must be emitted just before the opcode.
if (Need0FPrefix)
EmitByte(0x0F, CurByte, OS);
-
+
// FIXME: Pull this up into previous switch if REX can be moved earlier.
switch (TSFlags & X86II::Op0Mask) {
case X86II::TF: // F2 0F 38
@@ -516,8 +761,21 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0x3A, CurByte, OS);
break;
}
-
+}
+
+void X86MCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ unsigned Opcode = MI.getOpcode();
+ const TargetInstrDesc &Desc = TII.get(Opcode);
+ uint64_t TSFlags = Desc.TSFlags;
+
+ // Pseudo instructions don't get encoded.
+ if ((TSFlags & X86II::FormMask) == X86II::Pseudo)
+ return;
+
// If this is a two-address instruction, skip one of the register operands.
+ // FIXME: This should be handled during MCInst lowering.
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = 0;
if (NumOps > 1 && Desc.getOperandConstraint(1, TOI::TIED_TO) != -1)
@@ -525,56 +783,85 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
else if (NumOps > 2 && Desc.getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
// Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
--NumOps;
-
+
+ // Keep track of the current byte being emitted.
+ unsigned CurByte = 0;
+
+ // Is this instruction encoded using the AVX VEX prefix?
+ bool HasVEXPrefix = false;
+
+ // It uses the VEX.VVVV field?
+ bool HasVEX_4V = false;
+
+ if (TSFlags & X86II::VEX)
+ HasVEXPrefix = true;
+ if (TSFlags & X86II::VEX_4V)
+ HasVEX_4V = true;
+
+ // Determine where the memory operand starts, if present.
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
+ if (MemoryOperand != -1) MemoryOperand += CurOp;
+
+ if (!HasVEXPrefix)
+ EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+ else
+ EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
+
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ unsigned SrcRegNum = 0;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg:
assert(0 && "FIXME: Remove this form when the JIT moves to MCCodeEmitter!");
default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
assert(0 && "Unknown FormMask value in X86MCCodeEmitter!");
- case X86II::Pseudo: return; // Pseudo instructions encode to nothing.
+ case X86II::Pseudo:
+ assert(0 && "Pseudo instruction shouldn't be emitted");
case X86II::RawFrm:
EmitByte(BaseOpcode, CurByte, OS);
break;
-
+
case X86II::AddRegFrm:
EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
break;
-
+
case X86II::MRMDestReg:
EmitByte(BaseOpcode, CurByte, OS);
EmitRegModRMByte(MI.getOperand(CurOp),
GetX86RegNum(MI.getOperand(CurOp+1)), CurByte, OS);
CurOp += 2;
break;
-
+
case X86II::MRMDestMem:
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, CurOp,
- GetX86RegNum(MI.getOperand(CurOp + X86AddrNumOperands)),
+ GetX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)),
TSFlags, CurByte, OS, Fixups);
- CurOp += X86AddrNumOperands + 1;
+ CurOp += X86::AddrNumOperands + 1;
break;
-
+
case X86II::MRMSrcReg:
EmitByte(BaseOpcode, CurByte, OS);
- EmitRegModRMByte(MI.getOperand(CurOp+1), GetX86RegNum(MI.getOperand(CurOp)),
- CurByte, OS);
- CurOp += 2;
+ SrcRegNum = CurOp + 1;
+
+ if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
+ SrcRegNum++;
+
+ EmitRegModRMByte(MI.getOperand(SrcRegNum),
+ GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
+ CurOp = SrcRegNum + 1;
break;
-
+
case X86II::MRMSrcMem: {
+ int AddrOperands = X86::AddrNumOperands;
+ unsigned FirstMemOp = CurOp+1;
+ if (HasVEX_4V) {
+ ++AddrOperands;
+ ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
+ }
+
EmitByte(BaseOpcode, CurByte, OS);
- // FIXME: Maybe lea should have its own form? This is a horrible hack.
- int AddrOperands;
- if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
- Opcode == X86::LEA16r || Opcode == X86::LEA32r)
- AddrOperands = X86AddrNumOperands - 1; // No segment register
- else
- AddrOperands = X86AddrNumOperands;
-
- EmitMemModRMByte(MI, CurOp+1, GetX86RegNum(MI.getOperand(CurOp)),
+ EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
TSFlags, CurByte, OS, Fixups);
CurOp += AddrOperands + 1;
break;
@@ -584,6 +871,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
+ if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
+ CurOp++;
EmitByte(BaseOpcode, CurByte, OS);
EmitRegModRMByte(MI.getOperand(CurOp++),
(TSFlags & X86II::FormMask)-X86II::MRM0r,
@@ -596,7 +885,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode, CurByte, OS);
EmitMemModRMByte(MI, CurOp, (TSFlags & X86II::FormMask)-X86II::MRM0m,
TSFlags, CurByte, OS, Fixups);
- CurOp += X86AddrNumOperands;
+ CurOp += X86::AddrNumOperands;
break;
case X86II::MRM_C1:
EmitByte(BaseOpcode, CurByte, OS);
@@ -639,14 +928,27 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(0xF9, CurByte, OS);
break;
}
-
+
// If there is a remaining operand, it must be a trailing immediate. Emit it
// according to the right size for the instruction.
- if (CurOp != NumOps)
- EmitImmediate(MI.getOperand(CurOp++),
- X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
- CurByte, OS, Fixups);
-
+ if (CurOp != NumOps) {
+ // The last source register of a 4 operand instruction in AVX is encoded
+ // in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
+ if (TSFlags & X86II::VEX_I8IMM) {
+ const MCOperand &MO = MI.getOperand(CurOp++);
+ bool IsExtReg =
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg());
+ unsigned RegNum = (IsExtReg ? (1 << 7) : 0);
+ RegNum |= GetX86RegNum(MO) << 4;
+ EmitImmediate(MCOperand::CreateImm(RegNum), 1, FK_Data_1, CurByte, OS,
+ Fixups);
+ } else
+ EmitImmediate(MI.getOperand(CurOp++),
+ X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
+ CurByte, OS, Fixups);
+ }
+
+
#ifndef NDEBUG
// FIXME: Verify.
if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
index 98975ea..5f31e00 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -127,21 +127,29 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
return RegNo-X86::ST0;
- case X86::XMM0: case X86::XMM8: case X86::MM0:
+ case X86::XMM0: case X86::XMM8:
+ case X86::YMM0: case X86::YMM8: case X86::MM0:
return 0;
- case X86::XMM1: case X86::XMM9: case X86::MM1:
+ case X86::XMM1: case X86::XMM9:
+ case X86::YMM1: case X86::YMM9: case X86::MM1:
return 1;
- case X86::XMM2: case X86::XMM10: case X86::MM2:
+ case X86::XMM2: case X86::XMM10:
+ case X86::YMM2: case X86::YMM10: case X86::MM2:
return 2;
- case X86::XMM3: case X86::XMM11: case X86::MM3:
+ case X86::XMM3: case X86::XMM11:
+ case X86::YMM3: case X86::YMM11: case X86::MM3:
return 3;
- case X86::XMM4: case X86::XMM12: case X86::MM4:
+ case X86::XMM4: case X86::XMM12:
+ case X86::YMM4: case X86::YMM12: case X86::MM4:
return 4;
- case X86::XMM5: case X86::XMM13: case X86::MM5:
+ case X86::XMM5: case X86::XMM13:
+ case X86::YMM5: case X86::YMM13: case X86::MM5:
return 5;
- case X86::XMM6: case X86::XMM14: case X86::MM6:
+ case X86::XMM6: case X86::XMM14:
+ case X86::YMM6: case X86::YMM14: case X86::MM6:
return 6;
- case X86::XMM7: case X86::XMM15: case X86::MM7:
+ case X86::XMM7: case X86::XMM15:
+ case X86::YMM7: case X86::YMM15: case X86::MM7:
return 7;
case X86::ES:
@@ -157,6 +165,34 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
case X86::GS:
return 5;
+ case X86::CR0:
+ return 0;
+ case X86::CR1:
+ return 1;
+ case X86::CR2:
+ return 2;
+ case X86::CR3:
+ return 3;
+ case X86::CR4:
+ return 4;
+
+ case X86::DR0:
+ return 0;
+ case X86::DR1:
+ return 1;
+ case X86::DR2:
+ return 2;
+ case X86::DR3:
+ return 3;
+ case X86::DR4:
+ return 4;
+ case X86::DR5:
+ return 5;
+ case X86::DR6:
+ return 6;
+ case X86::DR7:
+ return 7;
+
default:
assert(isVirtualRegister(RegNo) && "Unknown physical register!");
llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
@@ -357,56 +393,6 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
}
}
-const TargetRegisterClass* const*
-X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- bool callsEHReturn = false;
- if (MF)
- callsEHReturn = MF->getMMI().callsEHReturn();
-
- static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass,
- &X86::GR32RegClass, &X86::GR32RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass, 0
- };
- static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::GR64RegClass, &X86::GR64RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass,
- &X86::VR128RegClass, &X86::VR128RegClass, 0
- };
-
- if (Is64Bit) {
- if (IsWin64)
- return CalleeSavedRegClassesWin64;
- else
- return (callsEHReturn ?
- CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit);
- } else {
- return (callsEHReturn ?
- CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit);
- }
-}
-
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
// Set the stack-pointer register and its aliases as reserved.
@@ -696,8 +682,7 @@ X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// }
// [EBP]
MFI->CreateFixedObject(-TailCallReturnAddrDelta,
- (-1U*SlotSize)+TailCallReturnAddrDelta,
- true, false);
+ (-1U*SlotSize)+TailCallReturnAddrDelta, true);
}
if (hasFP(MF)) {
@@ -710,7 +695,7 @@ X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
-(int)SlotSize +
TFI.getOffsetOfLocalArea() +
TailCallReturnAddrDelta,
- true, false);
+ true);
assert(FrameIdx == MFI->getObjectIndexBegin() &&
"Slot for EBP register must be last in order to be found!");
FrameIdx = 0;
@@ -1240,8 +1225,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
if (CSSize) {
unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
MachineInstr *MI =
- addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
- FramePtr, false, -CSSize);
+ addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
MBB.insert(MBBI, MI);
} else {
BuildMI(MBB, MBBI, DL,
@@ -1301,9 +1286,11 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
for (unsigned i = 0; i != 5; ++i)
MIB.addOperand(MBBI->getOperand(i));
} else if (RetOpcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
} else {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = prior(MBBI);
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
index d0b82e2..d852bcd 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.h
@@ -105,12 +105,6 @@ public:
/// callee-save registers on this target.
const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
- /// getCalleeSavedRegClasses - Return a null-terminated list of the preferred
- /// register classes to spill each callee-saved register with. The order and
- /// length of this list match the getCalleeSavedRegs() list.
- const TargetRegisterClass* const*
- getCalleeSavedRegClasses(const MachineFunction *MF = 0) const;
-
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
/// should be considered unavailable at all times, e.g. SP, RA. This is used by
diff --git a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
index 91cfaa9..9f0382e 100644
--- a/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
+++ b/contrib/llvm/lib/Target/X86/X86RegisterInfo.td
@@ -147,7 +147,7 @@ let Namespace = "X86" in {
def MM5 : Register<"mm5">, DwarfRegNum<[46, 34, 34]>;
def MM6 : Register<"mm6">, DwarfRegNum<[47, 35, 35]>;
def MM7 : Register<"mm7">, DwarfRegNum<[48, 36, 36]>;
-
+
// Pseudo Floating Point registers
def FP0 : Register<"fp0">;
def FP1 : Register<"fp1">;
@@ -155,7 +155,7 @@ let Namespace = "X86" in {
def FP3 : Register<"fp3">;
def FP4 : Register<"fp4">;
def FP5 : Register<"fp5">;
- def FP6 : Register<"fp6">;
+ def FP6 : Register<"fp6">;
// XMM Registers, used by the various SSE instruction set extensions.
// The sub_ss and sub_sd subregs are the same registers with another regclass.
@@ -357,7 +357,7 @@ def GR16 : RegisterClass<"X86", [i16], 16,
}];
}
-def GR32 : RegisterClass<"X86", [i32], 32,
+def GR32 : RegisterClass<"X86", [i32], 32,
[EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi), (GR16 sub_16bit)];
@@ -412,7 +412,7 @@ def GR32 : RegisterClass<"X86", [i32], 32,
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
// RIP isn't really a register and it can't be used anywhere except in an
// address, but it doesn't cause trouble.
-def GR64 : RegisterClass<"X86", [i64], 64,
+def GR64 : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP]> {
let SubRegClasses = [(GR8 sub_8bit, sub_8bit_hi),
@@ -446,7 +446,7 @@ def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]> {
}
// Debug registers.
-def DEBUG_REG : RegisterClass<"X86", [i32], 32,
+def DEBUG_REG : RegisterClass<"X86", [i32], 32,
[DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> {
}
@@ -780,14 +780,14 @@ def RST : RegisterClass<"X86", [f80, f64, f32], 32,
}
// Generic vector registers: VR64 and VR128.
-def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64, v2f32], 64,
+def VR64 : RegisterClass<"X86", [v8i8, v4i16, v2i32, v1i64], 64,
[MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7]>;
def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11,
XMM12, XMM13, XMM14, XMM15]> {
let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd)];
-
+
let MethodProtos = [{
iterator allocation_order_end(const MachineFunction &MF) const;
}];
@@ -803,11 +803,27 @@ def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128,
}
}];
}
-def VR256 : RegisterClass<"X86", [ v8i32, v4i64, v8f32, v4f64],256,
+
+def VR256 : RegisterClass<"X86", [v8i32, v4i64, v8f32, v4f64], 256,
[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
YMM8, YMM9, YMM10, YMM11,
YMM12, YMM13, YMM14, YMM15]> {
let SubRegClasses = [(FR32 sub_ss), (FR64 sub_sd), (VR128 sub_xmm)];
+
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ VR256Class::iterator
+ VR256Class::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (!Subtarget.is64Bit())
+ return end()-8; // Only YMM0 to YMM7 are available in 32-bit mode.
+ else
+ return end();
+ }
+ }];
}
// Status flags registers.
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 09a2685..4a10be5 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -53,9 +53,12 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const {
if (GV->hasDLLImportLinkage())
return X86II::MO_DLLIMPORT;
- // Materializable GVs (in JIT lazy compilation mode) do not require an
- // extra load from stub.
- bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
+ // Determine whether this is a reference to a definition or a declaration.
+ // Materializable GVs (in JIT lazy compilation mode) do not require an extra
+ // load from stub.
+ bool isDecl = GV->hasAvailableExternallyLinkage();
+ if (GV->isDeclaration() && !GV->isMaterializable())
+ isDecl = true;
// X86-64 in PIC mode.
if (isPICStyleRIPRel()) {
@@ -293,12 +296,11 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
, IsBTMemSlow(false)
, IsUAMemFast(false)
, HasVectorUAMem(false)
- , DarwinVers(0)
, stackAlignment(8)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
- , Is64Bit(is64Bit)
- , TargetType(isELF) { // Default to ELF unless otherwise specified.
+ , TargetTriple(TT)
+ , Is64Bit(is64Bit) {
// default to hard float ABI
if (FloatABIType == FloatABI::Default)
@@ -328,47 +330,40 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
HasCMov = true;
}
-
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!Is64Bit || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Set the boolean corresponding to the current target triple, or the default
- // if one cannot be determined, to true.
- if (TT.length() > 5) {
- size_t Pos;
- if ((Pos = TT.find("-darwin")) != std::string::npos) {
- TargetType = isDarwin;
-
- // Compute the darwin version number.
- if (isdigit(TT[Pos+7]))
- DarwinVers = atoi(&TT[Pos+7]);
- else
- DarwinVers = 8; // Minimum supported darwin is Tiger.
- } else if (TT.find("linux") != std::string::npos) {
- // Linux doesn't imply ELF, but we don't currently support anything else.
- TargetType = isELF;
- } else if (TT.find("cygwin") != std::string::npos) {
- TargetType = isCygwin;
- } else if (TT.find("mingw") != std::string::npos) {
- TargetType = isMingw;
- } else if (TT.find("win32") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("windows") != std::string::npos) {
- TargetType = isWindows;
- } else if (TT.find("-cl") != std::string::npos) {
- TargetType = isDarwin;
- DarwinVers = 9;
- }
- }
-
// Stack alignment is 16 bytes on Darwin (both 32 and 64 bit) and for all 64
// bit targets.
- if (TargetType == isDarwin || Is64Bit)
+ if (isTargetDarwin() || Is64Bit)
stackAlignment = 16;
if (StackAlignment)
stackAlignment = StackAlignment;
}
+
+/// IsCalleePop - Determines whether the callee is required to pop its
+/// own arguments. Callee pop is necessary to support tail calls.
+bool X86Subtarget::IsCalleePop(bool IsVarArg,
+ CallingConv::ID CallingConv) const {
+ if (IsVarArg)
+ return false;
+
+ switch (CallingConv) {
+ default:
+ return false;
+ case CallingConv::X86_StdCall:
+ return !is64Bit();
+ case CallingConv::X86_FastCall:
+ return !is64Bit();
+ case CallingConv::X86_ThisCall:
+ return !is64Bit();
+ case CallingConv::Fast:
+ return GuaranteedTailCallOpt;
+ case CallingConv::GHC:
+ return GuaranteedTailCallOpt;
+ }
+}
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h
index 646af91..486dbc4 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.h
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h
@@ -14,7 +14,9 @@
#ifndef X86SUBTARGET_H
#define X86SUBTARGET_H
+#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/CallingConv.h"
#include <string>
namespace llvm {
@@ -88,10 +90,6 @@ protected:
/// operands. This may require setting a feature bit in the processor.
bool HasVectorUAMem;
- /// DarwinVers - Nonzero if this is a darwin platform: the numeric
- /// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc.
- unsigned char DarwinVers; // Is any darwin-x86 platform.
-
/// stackAlignment - The minimum alignment known to hold of the stack frame on
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment;
@@ -99,6 +97,9 @@ protected:
/// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
///
unsigned MaxInlineSizeThreshold;
+
+ /// TargetTriple - What processor and OS we're targeting.
+ Triple TargetTriple;
private:
/// Is64Bit - True if the processor supports 64-bit instructions and
@@ -106,9 +107,6 @@ private:
bool Is64Bit;
public:
- enum {
- isELF, isCygwin, isDarwin, isWindows, isMingw
- } TargetType;
/// This constructor initializes the data members to match that
/// of the specified triple.
@@ -157,24 +155,31 @@ public:
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
- bool isTargetDarwin() const { return TargetType == isDarwin; }
- bool isTargetELF() const { return TargetType == isELF; }
+ bool isTargetDarwin() const { return TargetTriple.getOS() == Triple::Darwin; }
+
+ // ELF is a reasonably sane default and the only other X86 targets we
+ // support are Darwin and Windows. Just use "not those".
+ bool isTargetELF() const {
+ return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing();
+ }
+ bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; }
- bool isTargetWindows() const { return TargetType == isWindows; }
- bool isTargetMingw() const { return TargetType == isMingw; }
- bool isTargetCygwin() const { return TargetType == isCygwin; }
+ bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; }
+ bool isTargetMingw() const {
+ return TargetTriple.getOS() == Triple::MinGW32 ||
+ TargetTriple.getOS() == Triple::MinGW64; }
+ bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; }
bool isTargetCygMing() const {
- return TargetType == isMingw || TargetType == isCygwin;
+ return isTargetMingw() || isTargetCygwin();
}
-
+
/// isTargetCOFF - Return true if this is any COFF/Windows target variant.
bool isTargetCOFF() const {
- return TargetType == isMingw || TargetType == isCygwin ||
- TargetType == isWindows;
+ return isTargetMingw() || isTargetCygwin() || isTargetWindows();
}
bool isTargetWin64() const {
- return Is64Bit && (TargetType == isMingw || TargetType == isWindows);
+ return Is64Bit && (isTargetMingw() || isTargetWindows());
}
std::string getDataLayout() const {
@@ -208,7 +213,10 @@ public:
/// getDarwinVers - Return the darwin version number, 8 = Tiger, 9 = Leopard,
/// 10 = Snow Leopard, etc.
- unsigned getDarwinVers() const { return DarwinVers; }
+ unsigned getDarwinVers() const {
+ if (isTargetDarwin()) return TargetTriple.getDarwinMajorNumber();
+ return 0;
+ }
/// ClassifyGlobalReference - Classify a global variable reference for the
/// current subtarget according to how we should reference it in a non-pcrel
@@ -237,6 +245,9 @@ public:
/// indicating the number of scheduling cycles of backscheduling that
/// should be attempted.
unsigned getSpecialAddressLatency() const;
+
+ /// IsCalleePop - Test whether a function should pop its own arguments.
+ bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
};
} // End llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
index f2c5058..df00d3f 100644
--- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -173,14 +173,18 @@ bool X86TargetMachine::addInstSelector(PassManagerBase &PM,
// Install an instruction selector.
PM.add(createX86ISelDag(*this, OptLevel));
- // Install a pass to insert x87 FP_REG_KILL instructions, as needed.
- PM.add(createX87FPRegKillInserterPass());
+ // For 32-bit, prepend instructions to set the "global base reg" for PIC.
+ if (!Subtarget.is64Bit())
+ PM.add(createGlobalBaseRegPass());
return false;
}
bool X86TargetMachine::addPreRegAlloc(PassManagerBase &PM,
CodeGenOpt::Level OptLevel) {
+ // Install a pass to insert x87 FP_REG_KILL instructions, as needed.
+ PM.add(createX87FPRegKillInserterPass());
+
PM.add(createX86MaxStackAlignmentHeuristicPass());
return false; // -print-machineinstr shouldn't print after this.
}
diff --git a/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp b/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp
index c100c59..6656bdc 100644
--- a/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp
@@ -138,7 +138,6 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// FALL THROUGH
case GlobalValue::InternalLinkage:
case GlobalValue::PrivateLinkage:
- case GlobalValue::LinkerPrivateLinkage:
break;
case GlobalValue::DLLImportLinkage:
llvm_unreachable("DLLImport linkage is not supported by this target!");
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index b230572..abe7b2f 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -245,7 +245,7 @@ SDValue XCoreTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
{
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32);
// If it's a debug information descriptor, don't mess with it.
if (DAG.isVerifiedDebugInfoDesc(Op))
return GA;
@@ -269,7 +269,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc();
// transform to label + getid() * size
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
- SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+ SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
if (!GVar) {
// If GV is an alias then use the aliasee to determine size
@@ -454,12 +454,12 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const
if (LD->getAlignment() == 2) {
int SVOffset = LD->getSrcValueOffset();
- SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
+ SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, MVT::i32, dl, Chain,
BasePtr, LD->getSrcValue(), SVOffset, MVT::i16,
LD->isVolatile(), LD->isNonTemporal(), 2);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
- SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain,
+ SDValue High = DAG.getExtLoad(ISD::EXTLOAD, MVT::i32, dl, Chain,
HighAddr, LD->getSrcValue(), SVOffset + 2,
MVT::i16, LD->isVolatile(),
LD->isNonTemporal(), 2);
@@ -812,6 +812,7 @@ XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -826,7 +827,7 @@ XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case CallingConv::Fast:
case CallingConv::C:
return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
- Outs, Ins, dl, DAG, InVals);
+ Outs, OutVals, Ins, dl, DAG, InVals);
}
}
@@ -839,6 +840,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
@@ -866,7 +868,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- SDValue Arg = Outs[i].Val;
+ SDValue Arg = OutVals[i];
// Promote the value if needed.
switch (VA.getLocInfo()) {
@@ -919,7 +921,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
@@ -1072,7 +1074,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the frame index object for this incoming parameter...
int FI = MFI->CreateFixedObject(ObjSize,
LRSaveSize + VA.getLocMemOffset(),
- true, false);
+ true);
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
@@ -1097,7 +1099,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// address
for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) {
// Create a stack slot
- int FI = MFI->CreateFixedObject(4, offset, true, false);
+ int FI = MFI->CreateFixedObject(4, offset, true);
if (i == FirstVAReg) {
XFI->setVarArgsFrameIndex(FI);
}
@@ -1120,7 +1122,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// This will point to the next argument passed via stack.
XFI->setVarArgsFrameIndex(
MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
- true, false));
+ true));
}
}
@@ -1133,19 +1135,19 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool XCoreTargetLowering::
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const {
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
- RVLocs, *DAG.getContext());
- return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore);
+ RVLocs, Context);
+ return CCInfo.CheckReturn(Outs, RetCC_XCore);
}
SDValue
XCoreTargetLowering::LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of
@@ -1175,7 +1177,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
- Outs[i].Val, Flag);
+ OutVals[i], Flag);
// guarantee that all emitted copies are
// stuck together, avoiding something bad
@@ -1221,23 +1223,22 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineFunction *F = BB->getParent();
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
- BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
- .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
- // Update machine-CFG edges by first adding all successors of the current
- // block to the new block which will contain the Phi node for the select.
- for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
- E = BB->succ_end(); I != E; ++I)
- sinkMBB->addSuccessor(*I);
- // Next, remove all successors of the current block, and add the true
- // and fallthrough blocks as its successors.
- while (!BB->succ_empty())
- BB->removeSuccessor(BB->succ_begin());
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), BB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
+
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
+ .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -1250,11 +1251,12 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
BB = sinkMBB;
- BuildMI(BB, dl, TII.get(XCore::PHI), MI->getOperand(0).getReg())
+ BuildMI(*BB, BB->begin(), dl,
+ TII.get(XCore::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
- F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
+ MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -1379,7 +1381,6 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Mul0, Mul1, Addend0, Addend1;
if (N->getValueType(0) == MVT::i32 &&
isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
- SDValue Zero = DAG.getConstant(0, MVT::i32);
SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
DAG.getVTList(MVT::i32, MVT::i32), Mul0,
Mul1, Addend0, Addend1);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
index d8d2a3a..febc198 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -120,6 +120,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -178,6 +179,7 @@ namespace llvm {
CallingConv::ID CallConv, bool isVarArg,
bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
@@ -186,13 +188,13 @@ namespace llvm {
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
DebugLoc dl, SelectionDAG &DAG) const;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
- SelectionDAG &DAG) const;
+ const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ LLVMContext &Context) const;
};
}
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index 5260258..dd90ea9 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -299,9 +299,8 @@ XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
unsigned
XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond)const{
- // FIXME there should probably be a DebugLoc argument here
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL)const{
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -310,11 +309,11 @@ XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch
- BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(TBB);
} else {
// Conditional branch.
unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm());
- BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg())
.addMBB(TBB);
}
return 1;
@@ -323,9 +322,9 @@ XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
// Two-way Conditional branch.
assert(Cond.size() == 2 && "Unexpected number of components!");
unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm());
- BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg())
.addMBB(TBB);
- BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(FBB);
return 2;
}
@@ -357,37 +356,31 @@ XCoreInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
return 2;
}
-bool XCoreInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const {
-
- if (DestRC == SrcRC) {
- if (DestRC == XCore::GRRegsRegisterClass) {
- BuildMI(MBB, I, DL, get(XCore::ADD_2rus), DestReg)
- .addReg(SrcReg)
- .addImm(0);
- return true;
- } else {
- return false;
- }
+void XCoreInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ bool GRDest = XCore::GRRegsRegClass.contains(DestReg);
+ bool GRSrc = XCore::GRRegsRegClass.contains(SrcReg);
+
+ if (GRDest && GRSrc) {
+ BuildMI(MBB, I, DL, get(XCore::ADD_2rus), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0);
+ return;
}
- if (SrcRC == XCore::RRegsRegisterClass && SrcReg == XCore::SP &&
- DestRC == XCore::GRRegsRegisterClass) {
- BuildMI(MBB, I, DL, get(XCore::LDAWSP_ru6), DestReg)
- .addImm(0);
- return true;
+ if (GRDest && SrcReg == XCore::SP) {
+ BuildMI(MBB, I, DL, get(XCore::LDAWSP_ru6), DestReg).addImm(0);
+ return;
}
- if (DestRC == XCore::RRegsRegisterClass && DestReg == XCore::SP &&
- SrcRC == XCore::GRRegsRegisterClass) {
+
+ if (DestReg == XCore::SP && GRSrc) {
BuildMI(MBB, I, DL, get(XCore::SETSP_1r))
- .addReg(SrcReg);
- return true;
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
}
- return false;
+ llvm_unreachable("Impossible reg-to-reg copy");
}
void XCoreInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -438,8 +431,10 @@ bool XCoreInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(it->getReg());
- storeRegToStackSlot(MBB, MI, it->getReg(), true,
- it->getFrameIdx(), it->getRegClass(), &RI);
+ unsigned Reg = it->getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ storeRegToStackSlot(MBB, MI, Reg, true,
+ it->getFrameIdx(), RC, &RI);
if (emitFrameMoves) {
MCSymbol *SaveLabel = MF->getContext().CreateTempSymbol();
BuildMI(MBB, MI, DL, get(XCore::DBG_LABEL)).addSym(SaveLabel);
@@ -460,10 +455,11 @@ bool XCoreInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
--BeforeI;
for (std::vector<CalleeSavedInfo>::const_iterator it = CSI.begin();
it != CSI.end(); ++it) {
-
+ unsigned Reg = it->getReg();
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
loadRegFromStackSlot(MBB, MI, it->getReg(),
it->getFrameIdx(),
- it->getRegClass(), &RI);
+ RC, &RI);
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert multiple
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
index 9035ea9..e5b0171 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -58,17 +58,16 @@ public:
bool AllowModify) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
- virtual bool copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- DebugLoc DL) const;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
index dd3cbc1..19b9b1f 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
+++ b/contrib/llvm/lib/Target/XCore/XCoreInstrInfo.td
@@ -733,7 +733,7 @@ def NEG : _F2R<(outs GRRegs:$dst), (ins GRRegs:$b),
// TODO setd, eet, eef, getts, setpt, outct, inct, chkct, outt, intt, out,
// in, outshr, inshr, testct, testwct, tinitpc, tinitdp, tinitsp, tinitcp,
// tsetmr, sext (reg), zext (reg)
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let neverHasSideEffects = 1 in
def SEXT_rus : _FRUS<(outs GRRegs:$dst), (ins GRRegs:$src1, i32imm:$src2),
"sext $dst, $src2",
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.cpp
index 5f6feae..42ab1b3 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.cpp
@@ -10,7 +10,7 @@
#include "XCoreMCAsmInfo.h"
using namespace llvm;
-XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, const StringRef &TT) {
+XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, StringRef TT) {
SupportsDebugInformation = true;
Data16bitsDirective = "\t.short\t";
Data32bitsDirective = "\t.long\t";
diff --git a/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.h b/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.h
index 01f8e48..8403922 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreMCAsmInfo.h
@@ -14,14 +14,15 @@
#ifndef XCORETARGETASMINFO_H
#define XCORETARGETASMINFO_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
class Target;
- class StringRef;
+
class XCoreMCAsmInfo : public MCAsmInfo {
public:
- explicit XCoreMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit XCoreMCAsmInfo(const Target &T, StringRef TT);
};
} // namespace llvm
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
index 0cfb358..2a88342 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.cpp
@@ -82,18 +82,6 @@ const unsigned* XCoreRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF)
return CalleeSavedRegs;
}
-const TargetRegisterClass* const*
-XCoreRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
- static const TargetRegisterClass * const CalleeSavedRegClasses[] = {
- XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass,
- XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass,
- XCore::GRRegsRegisterClass, XCore::GRRegsRegisterClass,
- XCore::GRRegsRegisterClass, XCore::RRegsRegisterClass,
- 0
- };
- return CalleeSavedRegClasses;
-}
-
BitVector XCoreRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(XCore::CP);
@@ -320,7 +308,7 @@ XCoreRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
int FrameIdx;
if (! isVarArg) {
// A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
- FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true, false);
+ FrameIdx = MFI->CreateFixedObject(RC->getSize(), 0, true);
} else {
FrameIdx = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(),
false);
diff --git a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
index 5bdd059..66132ba 100644
--- a/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
+++ b/contrib/llvm/lib/Target/XCore/XCoreRegisterInfo.h
@@ -44,9 +44,6 @@ public:
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
- const TargetRegisterClass* const* getCalleeSavedRegClasses(
- const MachineFunction *MF = 0) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const;
bool requiresRegisterScavenging(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Transforms/Hello/Hello.cpp b/contrib/llvm/lib/Transforms/Hello/Hello.cpp
index 37d7a00..abfa514 100644
--- a/contrib/llvm/lib/Transforms/Hello/Hello.cpp
+++ b/contrib/llvm/lib/Transforms/Hello/Hello.cpp
@@ -28,7 +28,7 @@ namespace {
Hello() : FunctionPass(&ID) {}
virtual bool runOnFunction(Function &F) {
- HelloCounter++;
+ ++HelloCounter;
errs() << "Hello: ";
errs().write_escaped(F.getName()) << '\n';
return false;
@@ -46,7 +46,7 @@ namespace {
Hello2() : FunctionPass(&ID) {}
virtual bool runOnFunction(Function &F) {
- HelloCounter++;
+ ++HelloCounter;
errs() << "Hello: ";
errs().write_escaped(F.getName()) << '\n';
return false;
diff --git a/contrib/llvm/lib/Transforms/Hello/Hello.exports b/contrib/llvm/lib/Transforms/Hello/Hello.exports
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/Hello/Hello.exports
diff --git a/contrib/llvm/lib/Transforms/Hello/Makefile b/contrib/llvm/lib/Transforms/Hello/Makefile
index c5e75d4..f1e3148 100644
--- a/contrib/llvm/lib/Transforms/Hello/Makefile
+++ b/contrib/llvm/lib/Transforms/Hello/Makefile
@@ -12,5 +12,13 @@ LIBRARYNAME = LLVMHello
LOADABLE_MODULE = 1
USEDLIBS =
+# If we don't need RTTI or EH, there's no reason to export anything
+# from the hello plugin.
+ifneq ($(REQUIRES_RTTI), 1)
+ifneq ($(REQUIRES_EH), 1)
+EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/Hello.exports
+endif
+endif
+
include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
index 89f213e..28ea079 100644
--- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -360,19 +360,20 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
IndicesVector Operands;
for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
UI != E; ++UI) {
+ User *U = *UI;
Operands.clear();
- if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile()) return false; // Don't hack volatile loads
Loads.push_back(LI);
// Direct loads are equivalent to a GEP with a zero index and then a load.
Operands.push_back(0);
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
+ } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
if (GEP->use_empty()) {
// Dead GEP's cause trouble later. Just remove them if we run into
// them.
getAnalysis<AliasAnalysis>().deleteValue(GEP);
GEP->eraseFromParent();
- // TODO: This runs the above loop over and over again for dead GEPS
+ // TODO: This runs the above loop over and over again for dead GEPs
// Couldn't we just do increment the UI iterator earlier and erase the
// use?
return isSafeToPromoteArgument(Arg, isByVal);
@@ -452,12 +453,14 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg, bool isByVal) const {
// Now check every path from the entry block to the load for transparency.
// To do this, we perform a depth first search on the inverse CFG from the
// loading block.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
for (idf_ext_iterator<BasicBlock*, SmallPtrSet<BasicBlock*, 16> >
- I = idf_ext_begin(*PI, TranspBlocks),
- E = idf_ext_end(*PI, TranspBlocks); I != E; ++I)
+ I = idf_ext_begin(P, TranspBlocks),
+ E = idf_ext_end(P, TranspBlocks); I != E; ++I)
if (AA.canBasicBlockModify(**I, Arg, LoadSize))
return false;
+ }
}
// If the path from the entry of the function to each load is free of
diff --git a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 692e47d..475eee8 100644
--- a/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -120,9 +120,14 @@ namespace {
typedef SmallVector<RetOrArg, 5> UseVector;
+ protected:
+ // DAH uses this to specify a different ID.
+ explicit DAE(void *ID) : ModulePass(ID) {}
+
public:
static char ID; // Pass identification, replacement for typeid
DAE() : ModulePass(&ID) {}
+
bool runOnModule(Module &M);
virtual bool ShouldHackArguments() const { return false; }
@@ -155,6 +160,8 @@ namespace {
/// by bugpoint.
struct DAH : public DAE {
static char ID;
+ DAH() : DAE(&ID) {}
+
virtual bool ShouldHackArguments() const { return true; }
};
}
diff --git a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
index b429213..735a1c4 100644
--- a/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -160,13 +160,12 @@ static bool SafeToDestroyConstant(const Constant *C) {
static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
SmallPtrSet<const PHINode*, 16> &PHIUsers) {
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
- ++UI)
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
+ ++UI) {
+ const User *U = *UI;
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
GS.HasNonInstructionUser = true;
-
if (AnalyzeGlobal(CE, GS, PHIUsers)) return true;
-
- } else if (const Instruction *I = dyn_cast<Instruction>(*UI)) {
+ } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
if (!GS.HasMultipleAccessingFunctions) {
const Function *F = I->getParent()->getParent();
if (GS.AccessingFunction == 0)
@@ -221,18 +220,21 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
if (AnalyzeGlobal(I, GS, PHIUsers)) return true;
GS.HasPHIUser = true;
} else if (isa<CmpInst>(I)) {
+ // Nothing to analyse.
} else if (isa<MemTransferInst>(I)) {
- if (I->getOperand(1) == V)
+ const MemTransferInst *MTI = cast<MemTransferInst>(I);
+ if (MTI->getArgOperand(0) == V)
GS.StoredType = GlobalStatus::isStored;
- if (I->getOperand(2) == V)
+ if (MTI->getArgOperand(1) == V)
GS.isLoaded = true;
} else if (isa<MemSetInst>(I)) {
- assert(I->getOperand(1) == V && "Memset only takes one pointer!");
+ assert(cast<MemSetInst>(I)->getArgOperand(0) == V &&
+ "Memset only takes one pointer!");
GS.StoredType = GlobalStatus::isStored;
} else {
return true; // Any other non-load instruction might take address!
}
- } else if (const Constant *C = dyn_cast<Constant>(*UI)) {
+ } else if (const Constant *C = dyn_cast<Constant>(U)) {
GS.HasNonInstructionUser = true;
// We might have a dead and dangling constant hanging off of here.
if (!SafeToDestroyConstant(C))
@@ -242,6 +244,7 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS,
// Otherwise must be some other user.
return true;
}
+ }
return false;
}
@@ -1304,7 +1307,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
const Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
- NElems,
+ NElems, 0,
CI->getName() + ".f" + Twine(FieldNo));
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, CI);
@@ -1323,8 +1326,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
// if (F2) { free(F2); F2 = 0; }
// }
// The malloc can also fail if its argument is too large.
- Constant *ConstantZero = ConstantInt::get(CI->getOperand(1)->getType(), 0);
- Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getOperand(1),
+ Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
+ Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
ConstantZero, "isneg");
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
@@ -1511,10 +1514,10 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is an allocation of a fixed size array of structs, analyze as a
// variable size array. malloc [100 x struct],1 -> malloc struct, 100
- if (NElems == ConstantInt::get(CI->getOperand(1)->getType(), 1))
+ if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
if (const ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
AllocTy = AT->getElementType();
-
+
const StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
if (!AllocSTy)
return false;
@@ -1533,7 +1536,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
AllocSize, NumElements,
- CI->getName());
+ 0, CI->getName());
Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
CI->replaceAllUsesWith(Cast);
CI->eraseFromParent();
@@ -1597,13 +1600,15 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
GVElType->isFloatingPointTy() ||
GVElType->isPointerTy() || GVElType->isVectorTy())
return false;
-
+
// Walk the use list of the global seeing if all the uses are load or store.
// If there is anything else, bail out.
- for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
- if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
+ for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
+ User *U = *I;
+ if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
return false;
-
+ }
+
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
// Create the new global, initializing it to false.
@@ -1641,7 +1646,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// bool.
Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
- // If we're already replaced the input, StoredVal will be a cast or
+ // If we've already replaced the input, StoredVal will be a cast or
// select instruction. If not, it will be a load of the original
// global.
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
@@ -2260,8 +2265,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
getVal(Values, CI->getOperand(0)),
CI->getType());
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
- InstResult =
- ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
+ InstResult = ConstantExpr::getSelect(getVal(Values, SI->getOperand(0)),
getVal(Values, SI->getOperand(1)),
getVal(Values, SI->getOperand(2)));
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
@@ -2302,7 +2306,8 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (!Callee) return false; // Cannot resolve.
SmallVector<Constant*, 8> Formals;
- for (User::op_iterator i = CI->op_begin() + 1, e = CI->op_end();
+ CallSite CS(CI);
+ for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i)
Formals.push_back(getVal(Values, *i));
diff --git a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
index df2456f..e4db235 100644
--- a/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -85,15 +85,16 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
unsigned NumNonconstant = 0;
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
+ User *U = *UI;
// Ignore blockaddress uses.
- if (isa<BlockAddress>(*UI)) continue;
+ if (isa<BlockAddress>(U)) continue;
// Used by a non-instruction, or not the callee of a function, do not
// transform.
- if (!isa<CallInst>(*UI) && !isa<InvokeInst>(*UI))
+ if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
return false;
- CallSite CS = CallSite::get(cast<Instruction>(*UI));
+ CallSite CS = CallSite::get(cast<Instruction>(U));
if (!CS.isCallee(UI))
return false;
diff --git a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
index b785bb0..9bb01f5 100644
--- a/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/Inliner.cpp
@@ -399,7 +399,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// We can only inline direct calls to non-declarations.
if (Callee == 0 || Callee->isDeclaration()) continue;
- // If this call sites was obtained by inlining another function, verify
+ // If this call site was obtained by inlining another function, verify
// that the include path for the function did not include the callee
// itself. If so, we'd be recursively inlinling the same function,
// which would provide the same callsites, which would cause us to
@@ -468,7 +468,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// move a call site to a function in this SCC before the
// 'FirstCallInSCC' barrier.
if (SCC.isSingular()) {
- std::swap(CallSites[CSi], CallSites.back());
+ CallSites[CSi] = CallSites.back();
CallSites.pop_back();
} else {
CallSites.erase(CallSites.begin()+CSi);
diff --git a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp b/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
index 4d61e83..76cfef8 100644
--- a/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -42,6 +42,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -262,8 +263,8 @@ void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
// char*. It returns "void", so it doesn't need to replace any of
// Inst's uses and doesn't get a name.
CastInst* CI =
- new BitCastInst(Inst->getOperand(1), SBPTy, "LJBuf", Inst);
- Value *Args[] = { CI, Inst->getOperand(2) };
+ new BitCastInst(Inst->getArgOperand(0), SBPTy, "LJBuf", Inst);
+ Value *Args[] = { CI, Inst->getArgOperand(1) };
CallInst::Create(ThrowLongJmp, Args, Args + 2, "", Inst);
SwitchValuePair& SVP = SwitchValMap[Inst->getParent()->getParent()];
@@ -378,7 +379,7 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
const Type* SBPTy =
Type::getInt8PtrTy(Inst->getContext());
CastInst* BufPtr =
- new BitCastInst(Inst->getOperand(1), SBPTy, "SBJmpBuf", Inst);
+ new BitCastInst(Inst->getArgOperand(0), SBPTy, "SBJmpBuf", Inst);
Value *Args[] = {
GetSetJmpMap(Func), BufPtr,
ConstantInt::get(Type::getInt32Ty(Inst->getContext()), SetJmpIDMap[Func]++)
@@ -405,12 +406,14 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
// Loop over all of the uses of instruction. If any of them are after the
// call, "spill" the value to the stack.
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
- UI != E; ++UI)
- if (cast<Instruction>(*UI)->getParent() != ABlock ||
- InstrsAfterCall.count(cast<Instruction>(*UI))) {
+ UI != E; ++UI) {
+ User *U = *UI;
+ if (cast<Instruction>(U)->getParent() != ABlock ||
+ InstrsAfterCall.count(cast<Instruction>(U))) {
DemoteRegToStack(*II);
break;
}
+ }
InstrsAfterCall.clear();
// Change the setjmp call into a branch statement. We'll remove the
@@ -473,7 +476,8 @@ void LowerSetJmp::visitCallInst(CallInst& CI)
// Construct the new "invoke" instruction.
TerminatorInst* Term = OldBB->getTerminator();
- std::vector<Value*> Params(CI.op_begin() + 1, CI.op_end());
+ CallSite CS(&CI);
+ std::vector<Value*> Params(CS.arg_begin(), CS.arg_end());
InvokeInst* II =
InvokeInst::Create(CI.getCalledValue(), NewBB, PrelimBBMap[Func],
Params.begin(), Params.end(), CI.getName(), Term);
diff --git a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
index 622a9b5..aeeafe7 100644
--- a/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/MergeFunctions.cpp
@@ -146,7 +146,7 @@ static bool isEquivalentType(const Type *Ty1, const Type *Ty2) {
switch(Ty1->getTypeID()) {
default:
llvm_unreachable("Unknown type!");
- // Fall through in Release-Asserts mode.
+ // Fall through in Release mode.
case Type::IntegerTyID:
case Type::OpaqueTyID:
// Ty1 == Ty2 would have returned true earlier.
@@ -535,6 +535,7 @@ static LinkageCategory categorize(const Function *F) {
case GlobalValue::WeakAnyLinkage:
case GlobalValue::WeakODRLinkage:
case GlobalValue::ExternalWeakLinkage:
+ case GlobalValue::LinkerPrivateWeakLinkage:
return ExternalWeak;
case GlobalValue::ExternalLinkage:
@@ -602,6 +603,10 @@ static void ThunkGToF(Function *F, Function *G) {
}
static void AliasGToF(Function *F, Function *G) {
+ // Darwin will trigger llvm_unreachable if asked to codegen an alias.
+ return ThunkGToF(F, G);
+
+#if 0
if (!G->hasExternalLinkage() && !G->hasLocalLinkage() && !G->hasWeakLinkage())
return ThunkGToF(F, G);
@@ -613,6 +618,7 @@ static void AliasGToF(Function *F, Function *G) {
GA->setVisibility(G->getVisibility());
G->replaceAllUsesWith(GA);
G->eraseFromParent();
+#endif
}
static bool fold(std::vector<Function *> &FnVec, unsigned i, unsigned j) {
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
index 07525ea..6b9814c 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialInlining.cpp
@@ -66,13 +66,13 @@ Function* PartialInliner::unswitchFunction(Function* F) {
return 0;
// Clone the function, so that we can hack away on it.
- DenseMap<const Value*, Value*> ValueMap;
- Function* duplicateFunction = CloneFunction(F, ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Function* duplicateFunction = CloneFunction(F, VMap);
duplicateFunction->setLinkage(GlobalValue::InternalLinkage);
F->getParent()->getFunctionList().push_back(duplicateFunction);
- BasicBlock* newEntryBlock = cast<BasicBlock>(ValueMap[entryBlock]);
- BasicBlock* newReturnBlock = cast<BasicBlock>(ValueMap[returnBlock]);
- BasicBlock* newNonReturnBlock = cast<BasicBlock>(ValueMap[nonReturnBlock]);
+ BasicBlock* newEntryBlock = cast<BasicBlock>(VMap[entryBlock]);
+ BasicBlock* newReturnBlock = cast<BasicBlock>(VMap[returnBlock]);
+ BasicBlock* newNonReturnBlock = cast<BasicBlock>(VMap[nonReturnBlock]);
// Go ahead and update all uses to the duplicate, so that we can just
// use the inliner functionality when we're done hacking.
diff --git a/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp b/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
index 084b94e..58e1448 100644
--- a/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/PartialSpecialization.cpp
@@ -32,6 +32,10 @@
using namespace llvm;
STATISTIC(numSpecialized, "Number of specialized functions created");
+STATISTIC(numReplaced, "Number of callers replaced by specialization");
+
+// Maximum number of arguments markable interested
+static const int MaxInterests = 6;
// Call must be used at least occasionally
static const int CallsMin = 5;
@@ -40,8 +44,9 @@ static const int CallsMin = 5;
static const double ConstValPercent = .1;
namespace {
+ typedef SmallVector<int, MaxInterests> InterestingArgVector;
class PartSpec : public ModulePass {
- void scanForInterest(Function&, SmallVector<int, 6>&);
+ void scanForInterest(Function&, InterestingArgVector&);
int scanDistribution(Function&, int, std::map<Constant*, int>&);
public :
static char ID; // Pass identification, replacement for typeid
@@ -59,13 +64,15 @@ X("partialspecialization", "Partial Specialization");
// a call to the specialized function. Returns the specialized function
static Function*
SpecializeFunction(Function* F,
- DenseMap<const Value*, Value*>& replacements) {
+ ValueMap<const Value*, Value*>& replacements) {
// arg numbers of deleted arguments
- DenseSet<unsigned> deleted;
- for (DenseMap<const Value*, Value*>::iterator
+ DenseMap<unsigned, const Argument*> deleted;
+ for (ValueMap<const Value*, Value*>::iterator
repb = replacements.begin(), repe = replacements.end();
- repb != repe; ++repb)
- deleted.insert(cast<Argument>(repb->first)->getArgNo());
+ repb != repe; ++repb) {
+ Argument const *arg = cast<const Argument>(repb->first);
+ deleted[arg->getArgNo()] = arg;
+ }
Function* NF = CloneFunction(F, replacements);
NF->setLinkage(GlobalValue::InternalLinkage);
@@ -80,9 +87,23 @@ SpecializeFunction(Function* F,
if (CS.getCalledFunction() == F) {
SmallVector<Value*, 6> args;
- for (unsigned x = 0; x < CS.arg_size(); ++x)
- if (!deleted.count(x))
- args.push_back(CS.getArgument(x));
+ // Assemble the non-specialized arguments for the updated callsite.
+ // In the process, make sure that the specialized arguments are
+ // constant and match the specialization. If that's not the case,
+ // this callsite needs to call the original or some other
+ // specialization; don't change it here.
+ CallSite::arg_iterator as = CS.arg_begin(), ae = CS.arg_end();
+ for (CallSite::arg_iterator ai = as; ai != ae; ++ai) {
+ DenseMap<unsigned, const Argument*>::iterator delit = deleted.find(
+ std::distance(as, ai));
+ if (delit == deleted.end())
+ args.push_back(cast<Value>(ai));
+ else {
+ Constant *ci = dyn_cast<Constant>(ai);
+ if (!(ci && ci == replacements[delit->second]))
+ goto next_use;
+ }
+ }
Value* NCall;
if (CallInst *CI = dyn_cast<CallInst>(i)) {
NCall = CallInst::Create(NF, args.begin(), args.end(),
@@ -99,8 +120,11 @@ SpecializeFunction(Function* F,
}
CS.getInstruction()->replaceAllUsesWith(NCall);
CS.getInstruction()->eraseFromParent();
+ ++numReplaced;
}
}
+ next_use:
+ ;
}
return NF;
}
@@ -111,7 +135,7 @@ bool PartSpec::runOnModule(Module &M) {
for (Module::iterator I = M.begin(); I != M.end(); ++I) {
Function &F = *I;
if (F.isDeclaration() || F.mayBeOverridden()) continue;
- SmallVector<int, 6> interestingArgs;
+ InterestingArgVector interestingArgs;
scanForInterest(F, interestingArgs);
// Find the first interesting Argument that we can specialize on
@@ -126,7 +150,7 @@ bool PartSpec::runOnModule(Module &M) {
ee = distribution.end(); ii != ee; ++ii)
if (total > ii->second && ii->first &&
ii->second > total * ConstValPercent) {
- DenseMap<const Value*, Value*> m;
+ ValueMap<const Value*, Value*> m;
Function::arg_iterator arg = F.arg_begin();
for (int y = 0; y < interestingArgs[x]; ++y)
++arg;
@@ -143,7 +167,7 @@ bool PartSpec::runOnModule(Module &M) {
/// scanForInterest - This function decides which arguments would be worth
/// specializing on.
-void PartSpec::scanForInterest(Function& F, SmallVector<int, 6>& args) {
+void PartSpec::scanForInterest(Function& F, InterestingArgVector& args) {
for(Function::arg_iterator ii = F.arg_begin(), ee = F.arg_end();
ii != ee; ++ii) {
for(Value::use_iterator ui = ii->use_begin(), ue = ii->use_end();
diff --git a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
index 6bc8e66..12e8db8 100644
--- a/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StripSymbols.cpp
@@ -73,6 +73,19 @@ namespace {
AU.setPreservesAll();
}
};
+
+ class StripDeadDebugInfo : public ModulePass {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ explicit StripDeadDebugInfo()
+ : ModulePass(&ID) {}
+
+ virtual bool runOnModule(Module &M);
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+ };
}
char StripSymbols::ID = 0;
@@ -99,6 +112,14 @@ ModulePass *llvm::createStripDebugDeclarePass() {
return new StripDebugDeclare();
}
+char StripDeadDebugInfo::ID = 0;
+static RegisterPass<StripDeadDebugInfo>
+A("strip-dead-debug-info", "Strip debug info for unused symbols");
+
+ModulePass *llvm::createStripDeadDebugInfoPass() {
+ return new StripDeadDebugInfo();
+}
+
/// OnlyUsedBy - Return true if V is only used by Usr.
static bool OnlyUsedBy(Value *V, Value *Usr) {
for(Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I) {
@@ -223,27 +244,27 @@ static bool StripDebugInfo(Module &M) {
Changed = true;
}
- NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
- if (NMD) {
- Changed = true;
- NMD->eraseFromParent();
- }
-
- NMD = M.getNamedMetadata("llvm.dbg.lv");
- if (NMD) {
- Changed = true;
- NMD->eraseFromParent();
+ for (Module::named_metadata_iterator NMI = M.named_metadata_begin(),
+ NME = M.named_metadata_end(); NMI != NME;) {
+ NamedMDNode *NMD = NMI;
+ ++NMI;
+ if (NMD->getName().startswith("llvm.dbg.")) {
+ NMD->eraseFromParent();
+ Changed = true;
+ }
}
-
+
unsigned MDDbgKind = M.getMDKindID("dbg");
- for (Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
+ for (Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE;
++FI)
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE;
- ++BI)
+ ++BI) {
+ Changed = true; // FIXME: Only set if there was debug metadata.
BI->setMetadata(MDDbgKind, 0);
+ }
- return true;
+ return Changed;
}
bool StripSymbols::runOnModule(Module &M) {
@@ -266,8 +287,8 @@ bool StripDebugDeclare::runOnModule(Module &M) {
if (Declare) {
while (!Declare->use_empty()) {
CallInst *CI = cast<CallInst>(Declare->use_back());
- Value *Arg1 = CI->getOperand(1);
- Value *Arg2 = CI->getOperand(2);
+ Value *Arg1 = CI->getArgOperand(0);
+ Value *Arg2 = CI->getArgOperand(1);
assert(CI->use_empty() && "llvm.dbg intrinsic should have void result");
CI->eraseFromParent();
if (Arg1->use_empty()) {
@@ -295,3 +316,83 @@ bool StripDebugDeclare::runOnModule(Module &M) {
return true;
}
+
+/// getRealLinkageName - If special LLVM prefix that is used to inform the asm
+/// printer to not emit usual symbol prefix before the symbol name is used then
+/// return linkage name after skipping this special LLVM prefix.
+static StringRef getRealLinkageName(StringRef LinkageName) {
+ char One = '\1';
+ if (LinkageName.startswith(StringRef(&One, 1)))
+ return LinkageName.substr(1);
+ return LinkageName;
+}
+
+bool StripDeadDebugInfo::runOnModule(Module &M) {
+ bool Changed = false;
+
+ // Debugging infomration is encoded in llvm IR using metadata. This is designed
+ // such a way that debug info for symbols preserved even if symbols are
+ // optimized away by the optimizer. This special pass removes debug info for
+ // such symbols.
+
+ // llvm.dbg.gv keeps track of debug info for global variables.
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv")) {
+ SmallVector<MDNode *, 8> MDs;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ if (DIGlobalVariable(NMD->getOperand(i)).Verify())
+ MDs.push_back(NMD->getOperand(i));
+ else
+ Changed = true;
+ NMD->eraseFromParent();
+ NMD = NULL;
+
+ for (SmallVector<MDNode *, 8>::iterator I = MDs.begin(),
+ E = MDs.end(); I != E; ++I) {
+ if (M.getGlobalVariable(DIGlobalVariable(*I).getGlobal()->getName(),
+ true)) {
+ if (!NMD)
+ NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv");
+ NMD->addOperand(*I);
+ }
+ else
+ Changed = true;
+ }
+ }
+
+ // llvm.dbg.sp keeps track of debug info for subprograms.
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp")) {
+ SmallVector<MDNode *, 8> MDs;
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+ if (DISubprogram(NMD->getOperand(i)).Verify())
+ MDs.push_back(NMD->getOperand(i));
+ else
+ Changed = true;
+ NMD->eraseFromParent();
+ NMD = NULL;
+
+ for (SmallVector<MDNode *, 8>::iterator I = MDs.begin(),
+ E = MDs.end(); I != E; ++I) {
+ bool FnIsLive = false;
+ if (Function *F = DISubprogram(*I).getFunction())
+ if (M.getFunction(F->getName()))
+ FnIsLive = true;
+ if (FnIsLive) {
+ if (!NMD)
+ NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+ NMD->addOperand(*I);
+ } else {
+ // Remove llvm.dbg.lv.fnname named mdnode which may have been used
+ // to hold debug info for dead function's local variables.
+ StringRef FName = DISubprogram(*I).getLinkageName();
+ if (FName.empty())
+ FName = DISubprogram(*I).getName();
+ if (NamedMDNode *LVNMD =
+ M.getNamedMetadata(Twine("llvm.dbg.lv.",
+ getRealLinkageName(FName))))
+ LVNMD->eraseFromParent();
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
index 473e83c..a74686f 100644
--- a/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
+++ b/contrib/llvm/lib/Transforms/IPO/StructRetPromotion.cpp
@@ -107,12 +107,12 @@ CallGraphNode *SRETPromotion::PromoteReturn(CallGraphNode *CGN) {
// Check if it is ok to perform this promotion.
if (isSafeToUpdateAllCallers(F) == false) {
DEBUG(dbgs() << "SretPromotion: Not all callers can be updated\n");
- NumRejectedSRETUses++;
+ ++NumRejectedSRETUses;
return 0;
}
DEBUG(dbgs() << "SretPromotion: sret argument will be promoted\n");
- NumSRET++;
+ ++NumSRET;
// [1] Replace use of sret parameter
AllocaInst *TheAlloca = new AllocaInst(STy, NULL, "mrv",
F->getEntryBlock().begin());
@@ -171,16 +171,16 @@ bool SRETPromotion::isSafeToUpdateAllCallers(Function *F) {
// Check FirstArg's users.
for (Value::use_iterator ArgI = FirstArg->use_begin(),
ArgE = FirstArg->use_end(); ArgI != ArgE; ++ArgI) {
-
+ User *U = *ArgI;
// If FirstArg user is a CallInst that does not correspond to current
// call site then this function F is not suitable for sret promotion.
- if (CallInst *CI = dyn_cast<CallInst>(ArgI)) {
+ if (CallInst *CI = dyn_cast<CallInst>(U)) {
if (CI != Call)
return false;
}
// If FirstArg user is a GEP whose all users are not LoadInst then
// this function F is not suitable for sret promotion.
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(ArgI)) {
+ else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
// TODO : Use dom info and insert PHINodes to collect get results
// from multiple call sites for this GEP.
if (GEP->getParent() != Call->getParent())
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
index c7b04a4..24e0528 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombine.h
@@ -178,7 +178,8 @@ public:
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
Instruction *visitAllocaInst(AllocaInst &AI);
- Instruction *visitFree(Instruction &FI);
+ Instruction *visitMalloc(Instruction &FI);
+ Instruction *visitFree(CallInst &FI);
Instruction *visitLoadInst(LoadInst &LI);
Instruction *visitStoreInst(StoreInst &SI);
Instruction *visitBranchInst(BranchInst &BI);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 8586054..5876f40 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -472,6 +472,25 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *NewOr = Builder->CreateOr(Val, Val2);
return Builder->CreateICmp(LHSCC, NewOr, LHSCst);
}
+
+ // (icmp ne (A & C1), 0) & (icmp ne (A & C2), 0) -->
+ // (icmp eq (A & (C1|C2)), (C1|C2))
+ if (LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
+ Instruction *I1 = dyn_cast<Instruction>(Val);
+ Instruction *I2 = dyn_cast<Instruction>(Val2);
+ if (I1 && I1->getOpcode() == Instruction::And &&
+ I2 && I2->getOpcode() == Instruction::And &&
+ I1->getOperand(0) == I1->getOperand(0)) {
+ ConstantInt *CI1 = dyn_cast<ConstantInt>(I1->getOperand(1));
+ ConstantInt *CI2 = dyn_cast<ConstantInt>(I2->getOperand(1));
+ if (CI1 && !CI1->isZero() && CI2 && !CI2->isZero() &&
+ CI1->getValue().operator&(CI2->getValue()) == 0) {
+ Constant *ConstOr = ConstantExpr::getOr(CI1, CI2);
+ Value *NewAnd = Builder->CreateAnd(I1->getOperand(0), ConstOr);
+ return Builder->CreateICmp(ICmpInst::ICMP_EQ, NewAnd, ConstOr);
+ }
+ }
+ }
}
// From here on, we only handle:
@@ -1584,6 +1603,19 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if ((match(A, m_Not(m_Specific(B))) &&
match(D, m_Not(m_Specific(C)))))
return BinaryOperator::CreateXor(C, B);
+
+ // ((A|B)&1)|(B&-2) -> (A&1) | B
+ if (match(A, m_Or(m_Value(V1), m_Specific(B))) ||
+ match(A, m_Or(m_Specific(B), m_Value(V1)))) {
+ Instruction *Ret = FoldOrWithConstants(I, Op1, V1, B, C);
+ if (Ret) return Ret;
+ }
+ // (B&-2)|((A|B)&1) -> (A&1) | B
+ if (match(B, m_Or(m_Specific(A), m_Value(V1))) ||
+ match(B, m_Or(m_Value(V1), m_Specific(A)))) {
+ Instruction *Ret = FoldOrWithConstants(I, Op0, A, V1, D);
+ if (Ret) return Ret;
+ }
}
// (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
@@ -1599,19 +1631,6 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}
- // ((A|B)&1)|(B&-2) -> (A&1) | B
- if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
- match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
- Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
- if (Ret) return Ret;
- }
- // (B&-2)|((A|B)&1) -> (A&1) | B
- if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
- match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
- Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
- if (Ret) return Ret;
- }
-
// (~A | ~B) == (~(A & B)) - De Morgan's Law
if (Value *Op0NotVal = dyn_castNotVal(Op0))
if (Value *Op1NotVal = dyn_castNotVal(Op1))
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 38e7b6e..85251a8 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -112,8 +112,8 @@ unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
- unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
+ unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
+ unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@@ -125,7 +125,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
// load/store.
- ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
+ ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
if (MemOpLength == 0) return 0;
// Source and destination pointer types are always "i8*" for intrinsic. See
@@ -140,9 +140,9 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// Use an integer load+store unless we can find something better.
unsigned SrcAddrSp =
- cast<PointerType>(MI->getOperand(2)->getType())->getAddressSpace();
+ cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
unsigned DstAddrSp =
- cast<PointerType>(MI->getOperand(1)->getType())->getAddressSpace();
+ cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
@@ -154,8 +154,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// an i64 load+store, here because this improves the odds that the source or
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
- Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
- if (StrippedDest != MI->getOperand(1)) {
+ Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
+ if (StrippedDest != MI->getArgOperand(0)) {
const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
@@ -189,15 +189,15 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign);
- Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewSrcPtrTy);
- Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewDstPtrTy);
+ Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
+ Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
InsertNewInstBefore(L, *MI);
InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
*MI);
// Set the size of the copy to 0, it will be deleted on the next iteration.
- MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
+ MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
return MI;
}
@@ -250,6 +250,8 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (isFreeCall(&CI))
return visitFree(CI);
+ if (isMalloc(&CI))
+ return visitMalloc(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
// callee isn't.
@@ -261,7 +263,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallSite(&CI);
-
+
// Intrinsics cannot occur in an invoke, so handle them here instead of in
// visitCallSite.
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
@@ -287,11 +289,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (GVSrc->isConstant()) {
Module *M = CI.getParent()->getParent()->getParent();
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
- const Type *Tys[3] = { CI.getOperand(1)->getType(),
- CI.getOperand(2)->getType(),
- CI.getOperand(3)->getType() };
- CI.setCalledFunction(
- Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
+ const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
+ CI.getArgOperand(1)->getType(),
+ CI.getArgOperand(2)->getType() };
+ CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
Changed = true;
}
}
@@ -311,7 +312,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Instruction *I = SimplifyMemSet(MSI))
return I;
}
-
+
if (Changed) return II;
}
@@ -322,10 +323,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (!TD) break;
const Type *ReturnTy = CI.getType();
- bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
+ bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
// Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getOperand(1)->stripPointerCasts();
+ Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
// If we've stripped down to a single global variable that we
// can know the size of then just return that.
@@ -393,7 +394,6 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
return ReplaceInstUsesWith(CI, RetVal);
-
}
// Do not return "I don't know" here. Later optimization passes could
@@ -402,45 +402,45 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
+ if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap)
- return ReplaceInstUsesWith(CI, Operand->getOperand(1));
+ return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
- if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
+ if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap) {
unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
TI->getType()->getPrimitiveSizeInBits();
Value *CV = ConstantInt::get(Operand->getType(), C);
- Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
+ Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
return new TruncInst(V, TI->getType());
}
}
break;
case Intrinsic::powi:
- if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
+ if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// powi(x, 0) -> 1.0
if (Power->isZero())
return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
// powi(x, 1) -> x
if (Power->isOne())
- return ReplaceInstUsesWith(CI, II->getOperand(1));
+ return ReplaceInstUsesWith(CI, II->getArgOperand(0));
// powi(x, -1) -> 1/x
if (Power->isAllOnesValue())
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
- II->getOperand(1));
+ II->getArgOperand(0));
}
break;
case Intrinsic::cttz: {
// If all bits below the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned TrailingZeros = KnownOne.countTrailingZeros();
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
@@ -453,11 +453,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ctlz: {
// If all bits above the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned LeadingZeros = KnownOne.countLeadingZeros();
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
@@ -468,8 +468,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
break;
case Intrinsic::uadd_with_overflow: {
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
@@ -513,19 +513,19 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// FALL THROUGH uadd into sadd
case Intrinsic::sadd_with_overflow:
// Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(1)) &&
- !isa<Constant>(II->getOperand(2))) {
- Value *LHS = II->getOperand(1);
- II->setOperand(1, II->getOperand(2));
- II->setOperand(2, LHS);
+ if (isa<Constant>(II->getArgOperand(0)) &&
+ !isa<Constant>(II->getArgOperand(1))) {
+ Value *LHS = II->getArgOperand(0);
+ II->setArgOperand(0, II->getArgOperand(1));
+ II->setArgOperand(1, LHS);
return II;
}
// X + undef -> undef
- if (isa<UndefValue>(II->getOperand(2)))
+ if (isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X + 0 -> {X, false}
if (RHS->isZero()) {
Constant *V[] = {
@@ -533,7 +533,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
+ return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
break;
@@ -541,38 +541,38 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ssub_with_overflow:
// undef - X -> undef
// X - undef -> undef
- if (isa<UndefValue>(II->getOperand(1)) ||
- isa<UndefValue>(II->getOperand(2)))
+ if (isa<UndefValue>(II->getArgOperand(0)) ||
+ isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
+ if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X - 0 -> {X, false}
if (RHS->isZero()) {
Constant *V[] = {
- UndefValue::get(II->getOperand(1)->getType()),
+ UndefValue::get(II->getArgOperand(0)->getType()),
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
+ return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
break;
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
// Canonicalize constants into the RHS.
- if (isa<Constant>(II->getOperand(1)) &&
- !isa<Constant>(II->getOperand(2))) {
- Value *LHS = II->getOperand(1);
- II->setOperand(1, II->getOperand(2));
- II->setOperand(2, LHS);
+ if (isa<Constant>(II->getArgOperand(0)) &&
+ !isa<Constant>(II->getArgOperand(1))) {
+ Value *LHS = II->getArgOperand(0);
+ II->setArgOperand(0, II->getArgOperand(1));
+ II->setArgOperand(1, LHS);
return II;
}
// X * undef -> undef
- if (isa<UndefValue>(II->getOperand(2)))
+ if (isa<UndefValue>(II->getArgOperand(1)))
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
- if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
+ if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// X*0 -> {0, false}
if (RHSI->isZero())
return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
@@ -580,11 +580,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// X * 1 -> {X, false}
if (RHSI->equalsInt(1)) {
Constant *V[] = {
- UndefValue::get(II->getOperand(1)->getType()),
+ UndefValue::get(II->getArgOperand(0)->getType()),
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
+ return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
break;
@@ -595,8 +595,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_loadu_dq:
// Turn PPC lvx -> load if the pointer is known aligned.
// Turn X86 loadups -> load if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
+ if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
+ Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
}
@@ -604,22 +604,22 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
+ if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(1)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
- return new StoreInst(II->getOperand(1), Ptr);
+ PointerType::getUnqual(II->getArgOperand(0)->getType());
+ Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
+ return new StoreInst(II->getArgOperand(0), Ptr);
}
break;
case Intrinsic::x86_sse_storeu_ps:
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
+ if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
const Type *OpPtrTy =
- PointerType::getUnqual(II->getOperand(2)->getType());
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
- return new StoreInst(II->getOperand(2), Ptr);
+ PointerType::getUnqual(II->getArgOperand(1)->getType());
+ Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
+ return new StoreInst(II->getArgOperand(1), Ptr);
}
break;
@@ -627,12 +627,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// These intrinsics only demands the 0th element of its input vector. If
// we can simplify the input based on that, do so now.
unsigned VWidth =
- cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
+ cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
APInt DemandedElts(VWidth, 1);
APInt UndefElts(VWidth, 0);
- if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
+ if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
UndefElts)) {
- II->setOperand(1, V);
+ II->setArgOperand(0, V);
return II;
}
break;
@@ -640,7 +640,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
+ if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
// Check that all of the elements are integer constants or undefs.
@@ -655,8 +655,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
- Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
- Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
+ Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), Mask->getType());
+ Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), Mask->getType());
Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once.
@@ -689,7 +689,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
- if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
+ if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
BasicBlock::iterator BI = SS;
if (&*++BI == II)
@@ -772,13 +772,13 @@ protected:
NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
}
bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
- if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) {
+ if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp - CallInst::ArgOffset))) {
if (SizeCI->isAllOnesValue())
return true;
if (isString)
return SizeCI->getZExtValue() >=
- GetStringLength(CI->getOperand(SizeArgOp));
- if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp)))
+ GetStringLength(CI->getArgOperand(SizeArgOp - CallInst::ArgOffset));
+ if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getArgOperand(SizeArgOp - CallInst::ArgOffset)))
return SizeCI->getZExtValue() >= Arg->getZExtValue();
}
return false;
@@ -846,7 +846,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
CS.getInstruction());
- // If CS dues not return void then replaceAllUsesWith undef.
+ // If CS does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
CS.getInstruction()->
@@ -1140,7 +1140,7 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
IntrinsicInst *Tramp =
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
- Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
+ Function *NestF = cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
@@ -1181,7 +1181,7 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
do {
if (Idx == NestIdx) {
// Add the chain argument and attributes.
- Value *NestVal = Tramp->getOperand(3);
+ Value *NestVal = Tramp->getArgOperand(2);
if (NestVal->getType() != NestTy)
NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
NewArgs.push_back(NestVal);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index b0137c4..505a0bf 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -22,19 +22,18 @@ using namespace PatternMatch;
/// X*Scale+Offset.
///
static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
- int &Offset) {
- assert(Val->getType()->isIntegerTy(32) && "Unexpected allocation size type!");
+ uint64_t &Offset) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
Offset = CI->getZExtValue();
Scale = 0;
- return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
+ return ConstantInt::get(Val->getType(), 0);
}
if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
if (I->getOpcode() == Instruction::Shl) {
// This is a value scaled by '1 << the shift amt'.
- Scale = 1U << RHS->getZExtValue();
+ Scale = UINT64_C(1) << RHS->getZExtValue();
Offset = 0;
return I->getOperand(0);
}
@@ -100,7 +99,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// See if we can satisfy the modulus by pulling a scale out of the array
// size argument.
unsigned ArraySizeScale;
- int ArrayOffset;
+ uint64_t ArrayOffset;
Value *NumElements = // See if the array size is a decomposable linear expr.
DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
@@ -114,13 +113,13 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
if (Scale == 1) {
Amt = NumElements;
} else {
- Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
+ Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
// Insert before the alloca, not before the cast.
Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
}
- if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
- Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
+ if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
+ Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
Offset, true);
Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 861cf92..6c00586 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1423,7 +1423,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
switch (II->getIntrinsicID()) {
case Intrinsic::bswap:
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
+ ICI.setOperand(0, II->getArgOperand(0));
ICI.setOperand(1, ConstantInt::get(II->getContext(), RHSV.byteSwap()));
return &ICI;
case Intrinsic::ctlz:
@@ -1431,7 +1431,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
if (RHSV == RHS->getType()->getBitWidth()) {
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
+ ICI.setOperand(0, II->getArgOperand(0));
ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0));
return &ICI;
}
@@ -1440,13 +1440,13 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// popcount(A) == 0 -> A == 0 and likewise for !=
if (RHS->isZero()) {
Worklist.Add(II);
- ICI.setOperand(0, II->getOperand(1));
+ ICI.setOperand(0, II->getArgOperand(0));
ICI.setOperand(1, RHS);
return &ICI;
}
break;
default:
- break;
+ break;
}
}
}
@@ -1924,35 +1924,6 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
break;
}
- case Instruction::Call:
- // If we have (malloc != null), and if the malloc has a single use, we
- // can assume it is successful and remove the malloc.
- if (isMalloc(LHSI) && LHSI->hasOneUse() &&
- isa<ConstantPointerNull>(RHSC)) {
- // Need to explicitly erase malloc call here, instead of adding it to
- // Worklist, because it won't get DCE'd from the Worklist since
- // isInstructionTriviallyDead() returns false for function calls.
- // It is OK to replace LHSI/MallocCall with Undef because the
- // instruction that uses it will be erased via Worklist.
- if (extractMallocCall(LHSI)) {
- LHSI->replaceAllUsesWith(UndefValue::get(LHSI->getType()));
- EraseInstFromFunction(*LHSI);
- return ReplaceInstUsesWith(I,
- ConstantInt::get(Type::getInt1Ty(I.getContext()),
- !I.isTrueWhenEqual()));
- }
- if (CallInst* MallocCall = extractMallocCallFromBitCast(LHSI))
- if (MallocCall->hasOneUse()) {
- MallocCall->replaceAllUsesWith(
- UndefValue::get(MallocCall->getType()));
- EraseInstFromFunction(*MallocCall);
- Worklist.Add(LHSI); // The malloc's bitcast use.
- return ReplaceInstUsesWith(I,
- ConstantInt::get(Type::getInt1Ty(I.getContext()),
- !I.isTrueWhenEqual()));
- }
- }
- break;
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
if (RHSC->isNullValue() && TD &&
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 0f2a24f..8933a0b 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -13,6 +13,7 @@
#include "InstCombine.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
@@ -22,6 +23,18 @@ using namespace llvm;
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
+ // Ensure that the alloca array size argument has type intptr_t, so that
+ // any casting is exposed early.
+ if (TD) {
+ const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
+ if (AI.getArraySize()->getType() != IntPtrTy) {
+ Value *V = Builder->CreateIntCast(AI.getArraySize(),
+ IntPtrTy, false);
+ AI.setOperand(0, V);
+ return &AI;
+ }
+ }
+
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
@@ -352,10 +365,11 @@ DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
return 0;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
- if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
+ User *U = *UI;
+ if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U))
return DI;
- if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
- if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
+ if (isa<BitCastInst>(U) && U->hasOneUse()) {
+ if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(U->use_begin()))
return DI;
}
}
@@ -511,17 +525,20 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// Determine whether Dest has exactly two predecessors and, if so, compute
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
+ BasicBlock *P = *PI;
BasicBlock *OtherBB = 0;
- if (*PI != StoreBB)
- OtherBB = *PI;
- ++PI;
- if (PI == pred_end(DestBB))
+
+ if (P != StoreBB)
+ OtherBB = P;
+
+ if (++PI == pred_end(DestBB))
return false;
- if (*PI != StoreBB) {
+ P = *PI;
+ if (P != StoreBB) {
if (OtherBB)
return false;
- OtherBB = *PI;
+ OtherBB = P;
}
if (++PI != pred_end(DestBB))
return false;
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 65f0393..f7fc62f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -230,8 +230,9 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
bool isAddressTaken = false;
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
UI != E; ++UI) {
- if (isa<LoadInst>(UI)) continue;
- if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ User *U = *UI;
+ if (isa<LoadInst>(U)) continue;
+ if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
// If storing TO the alloca, then the address isn't taken.
if (SI->getOperand(1) == AI) continue;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index c958cde..f9ffdb1 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -329,6 +329,37 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
}
}
+ // Transform (X >s -1) ? C1 : C2 --> ((X >>s 31) & (C2 - C1)) + C1
+ // and (X <s 0) ? C2 : C1 --> ((X >>s 31) & (C2 - C1)) + C1
+ // FIXME: Type and constness constraints could be lifted, but we have to
+ // watch code size carefully. We should consider xor instead of
+ // sub/add when we decide to do that.
+ if (const IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
+ if (TrueVal->getType() == Ty) {
+ if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
+ ConstantInt *C1 = NULL, *C2 = NULL;
+ if (Pred == ICmpInst::ICMP_SGT && Cmp->isAllOnesValue()) {
+ C1 = dyn_cast<ConstantInt>(TrueVal);
+ C2 = dyn_cast<ConstantInt>(FalseVal);
+ } else if (Pred == ICmpInst::ICMP_SLT && Cmp->isNullValue()) {
+ C1 = dyn_cast<ConstantInt>(FalseVal);
+ C2 = dyn_cast<ConstantInt>(TrueVal);
+ }
+ if (C1 && C2) {
+ // This shift results in either -1 or 0.
+ Value *AShr = Builder->CreateAShr(CmpLHS, Ty->getBitWidth()-1);
+
+ // Check if we can express the operation with a single or.
+ if (C2->isAllOnesValue())
+ return ReplaceInstUsesWith(SI, Builder->CreateOr(AShr, C1));
+
+ Value *And = Builder->CreateAnd(AShr, C2->getValue()-C1->getValue());
+ return ReplaceInstUsesWith(SI, Builder->CreateAdd(And, C1));
+ }
+ }
+ }
+ }
+
if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
// Transform (X == Y) ? X : Y -> Y
if (Pred == ICmpInst::ICMP_EQ)
@@ -668,6 +699,34 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
SI.setOperand(2, TrueVal);
return &SI;
}
+
+ // select (A == 0 | B == 0), T, F--> select (A != 0 & B != 0), F, T
+ // Note: This is a canonicalization rather than an optimization, and is used
+ // to expose opportunities to other instcombine transforms.
+ Instruction* CondInst = dyn_cast<Instruction>(CondVal);
+ if (CondInst && CondInst->hasOneUse() &&
+ CondInst->getOpcode() == Instruction::Or) {
+ ICmpInst *LHSCmp = dyn_cast<ICmpInst>(CondInst->getOperand(0));
+ ICmpInst *RHSCmp = dyn_cast<ICmpInst>(CondInst->getOperand(1));
+ if (LHSCmp && LHSCmp->hasOneUse() &&
+ LHSCmp->getPredicate() == ICmpInst::ICMP_EQ &&
+ RHSCmp && RHSCmp->hasOneUse() &&
+ RHSCmp->getPredicate() == ICmpInst::ICMP_EQ) {
+ ConstantInt* C1 = dyn_cast<ConstantInt>(LHSCmp->getOperand(1));
+ ConstantInt* C2 = dyn_cast<ConstantInt>(RHSCmp->getOperand(1));
+ if (C1 && C1->isZero() && C2 && C2->isZero()) {
+ LHSCmp->setPredicate(ICmpInst::ICMP_NE);
+ RHSCmp->setPredicate(ICmpInst::ICMP_NE);
+ Value *And =
+ InsertNewInstBefore(BinaryOperator::CreateAnd(LHSCmp, RHSCmp,
+ "and."+CondVal->getName()), SI);
+ SI.setOperand(0, And);
+ SI.setOperand(1, FalseVal);
+ SI.setOperand(2, TrueVal);
+ return &SI;
+ }
+ }
+ }
return 0;
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 836bda3..e5ce8a6 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -404,7 +404,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == Op1C->getZExtValue()){
bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
- Value *Cmp = Builder->CreateICmpEQ(II->getOperand(1), RHS);
+ Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
return new ZExtInst(Cmp, II->getType());
}
}
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index cd41844..adf7a76 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -732,10 +732,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// the right place.
Instruction *NewVal;
if (InputBit > ResultBit)
- NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
+ NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
ConstantInt::get(I->getType(), InputBit-ResultBit));
else
- NewVal = BinaryOperator::CreateShl(I->getOperand(1),
+ NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
ConstantInt::get(I->getType(), ResultBit-InputBit));
NewVal->takeName(I);
return InsertNewInstBefore(NewVal, *I);
@@ -1052,12 +1052,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Intrinsic::x86_sse2_mul_sd:
case Intrinsic::x86_sse2_min_sd:
case Intrinsic::x86_sse2_max_sd:
- TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
+ TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
UndefElts, Depth+1);
- if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
- TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
+ if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
+ TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
UndefElts2, Depth+1);
- if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
+ if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
// If only the low elt is demanded and this is a scalarizable intrinsic,
// scalarize it now.
@@ -1069,8 +1069,8 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
case Intrinsic::x86_sse2_sub_sd:
case Intrinsic::x86_sse2_mul_sd:
// TODO: Lower MIN/MAX/ABS/etc
- Value *LHS = II->getOperand(1);
- Value *RHS = II->getOperand(2);
+ Value *LHS = II->getArgOperand(0);
+ Value *RHS = II->getArgOperand(1);
// Extract the element as scalars.
LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
ConstantInt::get(Type::getInt32Ty(I->getContext()), 0U)), *II);
diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index af9ec5c..af2958f 100644
--- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -710,8 +710,55 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return 0;
}
-Instruction *InstCombiner::visitFree(Instruction &FI) {
- Value *Op = FI.getOperand(1);
+
+
+static bool IsOnlyNullComparedAndFreed(const Value &V) {
+ for (Value::const_use_iterator UI = V.use_begin(), UE = V.use_end();
+ UI != UE; ++UI) {
+ const User *U = *UI;
+ if (isFreeCall(U))
+ continue;
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(U))
+ if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1)))
+ continue;
+ return false;
+ }
+ return true;
+}
+
+Instruction *InstCombiner::visitMalloc(Instruction &MI) {
+ // If we have a malloc call which is only used in any amount of comparisons
+ // to null and free calls, delete the calls and replace the comparisons with
+ // true or false as appropriate.
+ if (IsOnlyNullComparedAndFreed(MI)) {
+ for (Value::use_iterator UI = MI.use_begin(), UE = MI.use_end();
+ UI != UE;) {
+ // We can assume that every remaining use is a free call or an icmp eq/ne
+ // to null, so the cast is safe.
+ Instruction *I = cast<Instruction>(*UI);
+
+ // Early increment here, as we're about to get rid of the user.
+ ++UI;
+
+ if (isFreeCall(I)) {
+ EraseInstFromFunction(*cast<CallInst>(I));
+ continue;
+ }
+ // Again, the cast is safe.
+ ICmpInst *C = cast<ICmpInst>(I);
+ ReplaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()),
+ C->isFalseWhenEqual()));
+ EraseInstFromFunction(*C);
+ }
+ return EraseInstFromFunction(MI);
+ }
+ return 0;
+}
+
+
+
+Instruction *InstCombiner::visitFree(CallInst &FI) {
+ Value *Op = FI.getArgOperand(0);
// free undef -> unreachable.
if (isa<UndefValue>(Op)) {
@@ -726,23 +773,6 @@ Instruction *InstCombiner::visitFree(Instruction &FI) {
if (isa<ConstantPointerNull>(Op))
return EraseInstFromFunction(FI);
- // If we have a malloc call whose only use is a free call, delete both.
- if (isMalloc(Op)) {
- if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
- if (Op->hasOneUse() && CI->hasOneUse()) {
- EraseInstFromFunction(FI);
- EraseInstFromFunction(*CI);
- return EraseInstFromFunction(*cast<Instruction>(Op));
- }
- } else {
- // Op is a call to malloc
- if (Op->hasOneUse()) {
- EraseInstFromFunction(FI);
- return EraseInstFromFunction(*cast<Instruction>(Op));
- }
- }
- }
-
return 0;
}
@@ -896,7 +926,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
// We're extracting from an intrinsic, see if we're the only user, which
// allows us to simplify multiple result intrinsics to simpler things that
- // just get one value..
+ // just get one value.
if (II->hasOneUse()) {
// Check if we're grabbing the overflow bit or the result of a 'with
// overflow' intrinsic. If it's the latter we can remove the intrinsic
@@ -905,7 +935,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateAdd(LHS, RHS);
@@ -914,7 +944,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateSub(LHS, RHS);
@@ -923,7 +953,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow:
if (*EV.idx_begin() == 0) { // Normal result.
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
II->replaceAllUsesWith(UndefValue::get(II->getType()));
EraseInstFromFunction(*II);
return BinaryOperator::CreateMul(LHS, RHS);
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
index 5650150..41e3a39 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
@@ -143,7 +143,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
ProfileInfo::Edge edge = ProfileInfo::getEdge(0,entry);
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
printEdgeCounter(edge,entry,i);
- IncrementCounterInBlock(entry, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(entry, i, Counters); ++NumEdgesInserted;
Initializer[i++] = (Zero);
} else{
Initializer[i++] = (Uncounted);
@@ -166,7 +166,7 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
ProfileInfo::Edge edge = ProfileInfo::getEdge(BB,0);
if (!std::binary_search(MST.begin(), MST.end(), edge)) {
printEdgeCounter(edge,BB,i);
- IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
Initializer[i++] = (Zero);
} else{
Initializer[i++] = (Uncounted);
@@ -189,11 +189,11 @@ bool OptimalEdgeProfiler::runOnModule(Module &M) {
if (TI->getNumSuccessors() == 1) {
// Insert counter at the start of the block
printEdgeCounter(edge,BB,i);
- IncrementCounterInBlock(BB, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(BB, i, Counters); ++NumEdgesInserted;
} else {
// Insert counter at the start of the block
printEdgeCounter(edge,Succ,i);
- IncrementCounterInBlock(Succ, i, Counters); NumEdgesInserted++;
+ IncrementCounterInBlock(Succ, i, Counters); ++NumEdgesInserted;
}
Initializer[i++] = (Zero);
} else {
diff --git a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 8662a82..1a30e9b 100644
--- a/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -61,8 +61,8 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
Args[3] = ConstantInt::get(Type::getInt32Ty(Context), NumElements);
- Instruction *InitCall = CallInst::Create(InitFn, Args.begin(), Args.end(),
- "newargc", InsertPos);
+ CallInst *InitCall = CallInst::Create(InitFn, Args.begin(), Args.end(),
+ "newargc", InsertPos);
// If argc or argv are not available in main, just pass null values in.
Function::arg_iterator AI;
@@ -73,10 +73,10 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
if (AI->getType() != ArgVTy) {
Instruction::CastOps opcode = CastInst::getCastOpcode(AI, false, ArgVTy,
false);
- InitCall->setOperand(2,
+ InitCall->setArgOperand(1,
CastInst::Create(opcode, AI, ArgVTy, "argv.cast", InitCall));
} else {
- InitCall->setOperand(2, AI);
+ InitCall->setArgOperand(1, AI);
}
/* FALL THROUGH */
@@ -93,12 +93,12 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
}
opcode = CastInst::getCastOpcode(AI, true,
Type::getInt32Ty(Context), true);
- InitCall->setOperand(1,
+ InitCall->setArgOperand(0,
CastInst::Create(opcode, AI, Type::getInt32Ty(Context),
"argc.cast", InitCall));
} else {
AI->replaceAllUsesWith(InitCall);
- InitCall->setOperand(1, AI);
+ InitCall->setArgOperand(0, AI);
}
case 0: break;
diff --git a/contrib/llvm/lib/Transforms/Scalar/ABCD.cpp b/contrib/llvm/lib/Transforms/Scalar/ABCD.cpp
index 6135992..dcf14a6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ABCD.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ABCD.cpp
@@ -230,7 +230,7 @@ class ABCD : public FunctionPass {
DenseMapIterator<Value*, MemoizedResultChart> begin = map.begin();
DenseMapIterator<Value*, MemoizedResultChart> end = map.end();
for (; begin != end; ++begin) {
- begin->second.clear();
+ begin->second.clear();
}
map.clear();
}
@@ -396,8 +396,8 @@ class ABCD : public FunctionPass {
/// this case the method returns true, otherwise false. It also obtains the
/// Instruction and ConstantInt from the BinaryOperator and returns it.
bool createBinaryOperatorInfo(BinaryOperator *BO, Instruction **I1,
- Instruction **I2, ConstantInt **C1,
- ConstantInt **C2);
+ Instruction **I2, ConstantInt **C1,
+ ConstantInt **C2);
/// This method creates a constraint between a Sigma and an Instruction.
/// These constraints are created as soon as we find a comparator that uses a
diff --git a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
index 5a49841..2d19467 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ADCE.cpp
@@ -83,7 +83,7 @@ bool ADCE::runOnFunction(Function& F) {
for (SmallVector<Instruction*, 1024>::iterator I = worklist.begin(),
E = worklist.end(); I != E; ++I) {
- NumRemoved++;
+ ++NumRemoved;
(*I)->eraseFromParent();
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 93e9bfb..272066c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -548,7 +548,8 @@ protected:
CI->eraseFromParent();
}
bool isFoldable(unsigned SizeCIOp, unsigned, bool) const {
- if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp)))
+ if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp
+ - CallInst::ArgOffset)))
return SizeCI->isAllOnesValue();
return false;
}
@@ -559,7 +560,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// Lower all uses of llvm.objectsize.*
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
- bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
+ bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
const Type *ReturnTy = CI->getType();
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
CI->replaceAllUsesWith(RetVal);
@@ -759,8 +760,7 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
}
// Compute the constraint code and ConstraintType to use.
- TLI->ComputeConstraintToUse(OpInfo, SDValue(),
- OpInfo.ConstraintType == TargetLowering::C_Memory);
+ TLI->ComputeConstraintToUse(OpInfo, SDValue());
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
OpInfo.isIndirect) {
diff --git a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 09c01d3..e047e4f 100644
--- a/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -56,7 +56,8 @@ namespace {
}
bool runOnBasicBlock(BasicBlock &BB);
- bool handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep);
+ bool handleFreeWithNonTrivialDependency(const CallInst *F,
+ MemDepResult Dep);
bool handleEndBlock(BasicBlock &BB);
bool RemoveUndeadPointers(Value *Ptr, uint64_t killPointerSize,
BasicBlock::iterator &BBI,
@@ -73,7 +74,6 @@ namespace {
AU.addRequired<AliasAnalysis>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addPreserved<DominatorTree>();
- AU.addPreserved<AliasAnalysis>();
AU.addPreserved<MemoryDependenceAnalysis>();
}
@@ -123,14 +123,15 @@ static Value *getPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
- return MI->getOperand(1);
-
- switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
+ return MI->getArgOperand(0);
+
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
- return I->getOperand(1);
+ return II->getArgOperand(0);
case Intrinsic::lifetime_end:
- return I->getOperand(2);
+ return II->getArgOperand(1);
}
}
@@ -147,12 +148,13 @@ static unsigned getStoreSize(Instruction *I, const TargetData *TD) {
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
Len = MI->getLength();
} else {
- switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
+ switch (II->getIntrinsicID()) {
default: assert(false && "Unexpected intrinsic!");
case Intrinsic::init_trampoline:
return -1u;
case Intrinsic::lifetime_end:
- Len = I->getOperand(1);
+ Len = II->getArgOperand(0);
break;
}
}
@@ -201,8 +203,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
if (InstDep.isNonLocal()) continue;
// Handle frees whose dependencies are non-trivial.
- if (isFreeCall(Inst)) {
- MadeChange |= handleFreeWithNonTrivialDependency(Inst, InstDep);
+ if (const CallInst *F = isFreeCall(Inst)) {
+ MadeChange |= handleFreeWithNonTrivialDependency(F, InstDep);
continue;
}
@@ -218,7 +220,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
isElidable(DepStore)) {
// Delete the store and now-dead instructions that feed it.
DeleteDeadInstruction(DepStore);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
// DeleteDeadInstruction can delete the current instruction in loop
@@ -249,7 +251,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -270,7 +272,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -287,7 +289,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
/// handleFreeWithNonTrivialDependency - Handle frees of entire structures whose
/// dependency is a store to a field of that structure.
-bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
+bool DSE::handleFreeWithNonTrivialDependency(const CallInst *F,
+ MemDepResult Dep) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
Instruction *Dependency = Dep.getInst();
@@ -297,13 +300,13 @@ bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
Value *DepPointer = getPointerOperand(Dependency)->getUnderlyingObject();
// Check for aliasing.
- if (AA.alias(F->getOperand(1), 1, DepPointer, 1) !=
+ if (AA.alias(F->getArgOperand(0), 1, DepPointer, 1) !=
AliasAnalysis::MustAlias)
return false;
// DCE instructions only used to calculate that store
DeleteDeadInstruction(Dependency);
- NumFastStores++;
+ ++NumFastStores;
return true;
}
@@ -349,9 +352,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (deadPointers.count(pointerOperand)) {
// DCE instructions only used to calculate that store.
Instruction *Dead = BBI;
- BBI++;
+ ++BBI;
DeleteDeadInstruction(Dead, &deadPointers);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
}
@@ -371,9 +374,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// However, if this load is unused and not volatile, we can go ahead and
// remove it, and not have to worry about it making our pointer undead!
if (L->use_empty() && !L->isVolatile()) {
- BBI++;
+ ++BBI;
DeleteDeadInstruction(L, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
continue;
}
@@ -391,9 +394,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Dead alloca's can be DCE'd when we reach them
if (A->use_empty()) {
- BBI++;
+ ++BBI;
DeleteDeadInstruction(A, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
}
@@ -426,9 +429,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
getPointerSize(*I));
if (A == AliasAnalysis::ModRef)
- modRef++;
+ ++modRef;
else
- other++;
+ ++other;
if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
dead.push_back(*I);
@@ -442,9 +445,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
} else if (isInstructionTriviallyDead(BBI)) {
// For any non-memory-affecting non-terminators, DCE them as we reach them
Instruction *Inst = BBI;
- BBI++;
+ ++BBI;
DeleteDeadInstruction(Inst, &deadPointers);
- NumFastOther++;
+ ++NumFastOther;
MadeChange = true;
continue;
}
@@ -497,7 +500,7 @@ bool DSE::RemoveUndeadPointers(Value *killPointer, uint64_t killPointerSize,
// Remove it!
++BBI;
DeleteDeadInstruction(S, &deadPointers);
- NumFastStores++;
+ ++NumFastStores;
MadeChange = true;
continue;
diff --git a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
index ca8ab49..88b6776 100644
--- a/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -35,6 +35,7 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/PHITransAddr.h"
@@ -271,7 +272,8 @@ Expression ValueTable::create_expression(CallInst* C) {
e.function = C->getCalledFunction();
e.opcode = Expression::CALL;
- for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
+ CallSite CS(C);
+ for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
I != E; ++I)
e.varargs.push_back(lookup_or_add(*I));
@@ -447,14 +449,14 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
if (local_dep.isDef()) {
CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
- if (local_cdep->getNumOperands() != C->getNumOperands()) {
+ if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
+ for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
+ uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
+ uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
@@ -504,13 +506,13 @@ uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
return nextValueNumber++;
}
- if (cdep->getNumOperands() != C->getNumOperands()) {
+ if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
+ for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
+ uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
+ uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
if (c_vn != cd_vn) {
valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
@@ -1500,7 +1502,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
MD->invalidateCachedPointerInfo(V);
VN.erase(LI);
toErase.push_back(LI);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1723,7 +1725,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
MD->invalidateCachedPointerInfo(V);
VN.erase(LI);
toErase.push_back(LI);
- NumPRELoad++;
+ ++NumPRELoad;
return true;
}
@@ -1784,7 +1786,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(AvailVal);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1830,7 +1832,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(StoredVal);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1860,7 +1862,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
MD->invalidateCachedPointerInfo(DepLI);
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1871,7 +1873,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
@@ -1882,7 +1884,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
VN.erase(L);
toErase.push_back(L);
- NumGVNLoad++;
+ ++NumGVNLoad;
return true;
}
}
@@ -2014,7 +2016,7 @@ bool GVN::runOnFunction(Function& F) {
BasicBlock *BB = FI;
++FI;
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
- if (removedBlock) NumGVNBlocks++;
+ if (removedBlock) ++NumGVNBlocks;
Changed |= removedBlock;
}
@@ -2126,27 +2128,28 @@ bool GVN::performPRE(Function &F) {
for (pred_iterator PI = pred_begin(CurrentBlock),
PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
// We're not interested in PRE where the block is its
// own predecessor, or in blocks with predecessors
// that are not reachable.
- if (*PI == CurrentBlock) {
+ if (P == CurrentBlock) {
NumWithout = 2;
break;
- } else if (!localAvail.count(*PI)) {
+ } else if (!localAvail.count(P)) {
NumWithout = 2;
break;
}
DenseMap<uint32_t, Value*>::iterator predV =
- localAvail[*PI]->table.find(ValNo);
- if (predV == localAvail[*PI]->table.end()) {
- PREPred = *PI;
- NumWithout++;
+ localAvail[P]->table.find(ValNo);
+ if (predV == localAvail[P]->table.end()) {
+ PREPred = P;
+ ++NumWithout;
} else if (predV->second == CurInst) {
NumWithout = 2;
} else {
- predMap[*PI] = predV->second;
- NumWith++;
+ predMap[P] = predV->second;
+ ++NumWith;
}
}
@@ -2201,7 +2204,7 @@ bool GVN::performPRE(Function &F) {
PREInstr->setName(CurInst->getName() + ".pre");
predMap[PREPred] = PREInstr;
VN.add(PREInstr, ValNo);
- NumGVNPRE++;
+ ++NumGVNPRE;
// Update the availability map to include the new instruction.
localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
@@ -2211,8 +2214,10 @@ bool GVN::performPRE(Function &F) {
CurInst->getName() + ".pre-phi",
CurrentBlock->begin());
for (pred_iterator PI = pred_begin(CurrentBlock),
- PE = pred_end(CurrentBlock); PI != PE; ++PI)
- Phi->addIncoming(predMap[*PI], *PI);
+ PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ Phi->addIncoming(predMap[P], P);
+ }
VN.add(Phi, ValNo);
localAvail[CurrentBlock]->table[ValNo] = Phi;
diff --git a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
index 36bea67..b5c9dd8 100644
--- a/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -467,6 +467,17 @@ void IndVarSimplify::EliminateIVRemainders() {
}
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
+ // If LoopSimplify form is not available, stay out of trouble. Some notes:
+ // - LSR currently only supports LoopSimplify-form loops. Indvars'
+ // canonicalization can be a pessimization without LSR to "clean up"
+ // afterwards.
+ // - We depend on having a preheader; in particular,
+ // Loop::getCanonicalInductionVariable only supports loops with preheaders,
+ // and we're in trouble if we can't find the induction variable even when
+ // we've manually inserted one.
+ if (!L->isLoopSimplifyForm())
+ return false;
+
IU = &getAnalysis<IVUsers>();
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
@@ -760,8 +771,9 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
bool UsedInLoop = false;
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
UI != UE; ++UI) {
- BasicBlock *UseBB = cast<Instruction>(UI)->getParent();
- if (PHINode *P = dyn_cast<PHINode>(UI)) {
+ User *U = *UI;
+ BasicBlock *UseBB = cast<Instruction>(U)->getParent();
+ if (PHINode *P = dyn_cast<PHINode>(U)) {
unsigned i =
PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
UseBB = P->getIncomingBlock(i);
diff --git a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
index df05b71..edce14c 100644
--- a/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/JumpThreading.cpp
@@ -18,6 +18,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
@@ -288,14 +289,15 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
// Perhaps getConstantOnEdge should be smart enough to do this?
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
// If the value is known by LazyValueInfo to be a constant in a
// predecessor, use that information to try to thread this block.
- Constant *PredCst = LVI->getConstantOnEdge(V, *PI, BB);
+ Constant *PredCst = LVI->getConstantOnEdge(V, P, BB);
if (PredCst == 0 ||
(!isa<ConstantInt>(PredCst) && !isa<UndefValue>(PredCst)))
continue;
- Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), *PI));
+ Result.push_back(std::make_pair(dyn_cast<ConstantInt>(PredCst), P));
}
return !Result.empty();
@@ -345,8 +347,19 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
}
for (unsigned i = 0, e = RHSVals.size(); i != e; ++i)
if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0) {
- Result.push_back(RHSVals[i]);
- Result.back().first = InterestingVal;
+ // If we already inferred a value for this block on the LHS, don't
+ // re-add it.
+ bool HasValue = false;
+ for (unsigned r = 0, e = Result.size(); r != e; ++r)
+ if (Result[r].second == RHSVals[i].second) {
+ HasValue = true;
+ break;
+ }
+
+ if (!HasValue) {
+ Result.push_back(RHSVals[i]);
+ Result.back().first = InterestingVal;
+ }
}
return !Result.empty();
}
@@ -409,20 +422,21 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
(!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB)) {
Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
-
+
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
// If the value is known by LazyValueInfo to be a constant in a
// predecessor, use that information to try to thread this block.
LazyValueInfo::Tristate
Res = LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0),
- RHSCst, *PI, BB);
+ RHSCst, P, BB);
if (Res == LazyValueInfo::Unknown)
continue;
Constant *ResC = ConstantInt::get(Cmp->getType(), Res);
- Result.push_back(std::make_pair(cast<ConstantInt>(ResC), *PI));
+ Result.push_back(std::make_pair(cast<ConstantInt>(ResC), P));
}
-
+
return !Result.empty();
}
}
@@ -538,18 +552,22 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
(CondInst == 0 || CondInst->getParent() != BB)) { // Non-local definition.
pred_iterator PI = pred_begin(BB), E = pred_end(BB);
if (isa<BranchInst>(BB->getTerminator())) {
- for (; PI != E; ++PI)
- if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
if (PBI->isConditional() && PBI->getCondition() == Condition &&
- ProcessBranchOnDuplicateCond(*PI, BB))
+ ProcessBranchOnDuplicateCond(P, BB))
return true;
+ }
} else {
assert(isa<SwitchInst>(BB->getTerminator()) && "Unknown jump terminator");
- for (; PI != E; ++PI)
- if (SwitchInst *PSI = dyn_cast<SwitchInst>((*PI)->getTerminator()))
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (SwitchInst *PSI = dyn_cast<SwitchInst>(P->getTerminator()))
if (PSI->getCondition() == Condition &&
- ProcessSwitchOnDuplicateCond(*PI, BB))
+ ProcessSwitchOnDuplicateCond(P, BB))
return true;
+ }
}
}
@@ -569,19 +587,21 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// If we have a comparison, loop over the predecessors to see if there is
// a condition with a lexically identical value.
pred_iterator PI = pred_begin(BB), E = pred_end(BB);
- for (; PI != E; ++PI)
- if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
- if (PBI->isConditional() && *PI != BB) {
+ for (; PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if (BranchInst *PBI = dyn_cast<BranchInst>(P->getTerminator()))
+ if (PBI->isConditional() && P != BB) {
if (CmpInst *CI = dyn_cast<CmpInst>(PBI->getCondition())) {
if (CI->getOperand(0) == CondCmp->getOperand(0) &&
CI->getOperand(1) == CondCmp->getOperand(1) &&
CI->getPredicate() == CondCmp->getPredicate()) {
// TODO: Could handle things like (x != 4) --> (x == 17)
- if (ProcessBranchOnDuplicateCond(*PI, BB))
+ if (ProcessBranchOnDuplicateCond(P, BB))
return true;
}
}
}
+ }
}
}
@@ -869,9 +889,15 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Add all the unavailable predecessors to the PredsToSplit list.
for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB);
- PI != PE; ++PI)
- if (!AvailablePredSet.count(*PI))
- PredsToSplit.push_back(*PI);
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ // If the predecessor is an indirect goto, we can't split the edge.
+ if (isa<IndirectBrInst>(P->getTerminator()))
+ return false;
+
+ if (!AvailablePredSet.count(P))
+ PredsToSplit.push_back(P);
+ }
// Split them out to their own block.
UnavailablePred =
@@ -903,11 +929,12 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// have multiple entries here.
for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E;
++PI) {
+ BasicBlock *P = *PI;
AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
- std::make_pair(*PI, (Value*)0));
+ std::make_pair(P, (Value*)0));
- assert(I != AvailablePreds.end() && I->first == *PI &&
+ assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!");
PN->addIncoming(I->second, I->first);
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
index 48817ab..e4894e9 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -83,7 +83,7 @@ bool LoopDeletion::IsLoopDead(Loop* L,
if (!L->makeLoopInvariant(I, Changed, Preheader->getTerminator()))
return false;
- BI++;
+ ++BI;
}
// Make sure that no instructions in the block have potential side-effects.
@@ -176,7 +176,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
BasicBlock::iterator BI = exitBlock->begin();
while (PHINode* P = dyn_cast<PHINode>(BI)) {
P->replaceUsesOfWith(exitingBlock, preheader);
- BI++;
+ ++BI;
}
// Update the dominator tree and remove the instructions and blocks that will
@@ -226,7 +226,7 @@ bool LoopDeletion::runOnLoop(Loop* L, LPPassManager& LPM) {
LPM.deleteLoopFromQueue(L);
Changed = true;
- NumDeleted++;
+ ++NumDeleted;
return Changed;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
index 101ff5b..31058e5 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopIndexSplit.cpp
@@ -649,7 +649,7 @@ bool LoopIndexSplit::updateLoopIterationSpace() {
}
}
}
- NumRestrictBounds++;
+ ++NumRestrictBounds;
return true;
}
@@ -958,11 +958,11 @@ bool LoopIndexSplit::splitLoop() {
continue;
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE; ++BI) {
+ BI != BE; ++BI) {
Instruction *Inst = BI;
if (!Inst->isSafeToSpeculativelyExecute() && !isa<PHINode>(Inst)
- && !isa<BranchInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst))
+ && !isa<BranchInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst))
return false;
}
}
@@ -1016,13 +1016,13 @@ bool LoopIndexSplit::splitLoop() {
BSV = getMax(BSV, IVStartValue, Sign, PHTerm);
// [*] Clone Loop
- DenseMap<const Value *, Value *> ValueMap;
- Loop *BLoop = CloneLoop(L, LPM, LI, ValueMap, this);
+ ValueMap<const Value *, Value *> VMap;
+ Loop *BLoop = CloneLoop(L, LPM, LI, VMap, this);
Loop *ALoop = L;
// [*] ALoop's exiting edge enters BLoop's header.
// ALoop's original exit block becomes BLoop's exit block.
- PHINode *B_IndVar = cast<PHINode>(ValueMap[IndVar]);
+ PHINode *B_IndVar = cast<PHINode>(VMap[IndVar]);
BasicBlock *A_ExitingBlock = ExitCondition->getParent();
BranchInst *A_ExitInsn =
dyn_cast<BranchInst>(A_ExitingBlock->getTerminator());
@@ -1047,7 +1047,7 @@ bool LoopIndexSplit::splitLoop() {
for (BasicBlock::iterator BI = ALoop->getHeader()->begin(),
BE = ALoop->getHeader()->end(); BI != BE; ++BI) {
if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PHINode *PNClone = cast<PHINode>(ValueMap[PN]);
+ PHINode *PNClone = cast<PHINode>(VMap[PN]);
InverseMap[PNClone] = PN;
} else
break;
@@ -1085,11 +1085,11 @@ bool LoopIndexSplit::splitLoop() {
// block. Remove incoming PHINode values from ALoop's exiting block.
// Add new incoming values from BLoop's incoming exiting value.
// Update BLoop exit block's dominator info..
- BasicBlock *B_ExitingBlock = cast<BasicBlock>(ValueMap[A_ExitingBlock]);
+ BasicBlock *B_ExitingBlock = cast<BasicBlock>(VMap[A_ExitingBlock]);
for (BasicBlock::iterator BI = B_ExitBlock->begin(), BE = B_ExitBlock->end();
BI != BE; ++BI) {
if (PHINode *PN = dyn_cast<PHINode>(BI)) {
- PN->addIncoming(ValueMap[PN->getIncomingValueForBlock(A_ExitingBlock)],
+ PN->addIncoming(VMap[PN->getIncomingValueForBlock(A_ExitingBlock)],
B_ExitingBlock);
PN->removeIncomingValue(A_ExitingBlock);
} else
@@ -1131,7 +1131,7 @@ bool LoopIndexSplit::splitLoop() {
removeBlocks(A_InactiveBranch, L, A_ActiveBranch);
//[*] Eliminate split condition's inactive branch in from BLoop.
- BasicBlock *B_SplitCondBlock = cast<BasicBlock>(ValueMap[A_SplitCondBlock]);
+ BasicBlock *B_SplitCondBlock = cast<BasicBlock>(VMap[A_SplitCondBlock]);
BranchInst *B_BR = cast<BranchInst>(B_SplitCondBlock->getTerminator());
BasicBlock *B_InactiveBranch = NULL;
BasicBlock *B_ActiveBranch = NULL;
@@ -1146,9 +1146,9 @@ bool LoopIndexSplit::splitLoop() {
//[*] Move exit condition into split condition block to avoid
// executing dead loop iteration.
- ICmpInst *B_ExitCondition = cast<ICmpInst>(ValueMap[ExitCondition]);
- Instruction *B_IndVarIncrement = cast<Instruction>(ValueMap[IVIncrement]);
- ICmpInst *B_SplitCondition = cast<ICmpInst>(ValueMap[SplitCondition]);
+ ICmpInst *B_ExitCondition = cast<ICmpInst>(VMap[ExitCondition]);
+ Instruction *B_IndVarIncrement = cast<Instruction>(VMap[IVIncrement]);
+ ICmpInst *B_SplitCondition = cast<ICmpInst>(VMap[SplitCondition]);
moveExitCondition(A_SplitCondBlock, A_ActiveBranch, A_ExitBlock, ExitCondition,
cast<ICmpInst>(SplitCondition), IndVar, IVIncrement,
@@ -1159,7 +1159,7 @@ bool LoopIndexSplit::splitLoop() {
B_SplitCondition, B_IndVar, B_IndVarIncrement,
BLoop, EVOpNum);
- NumIndexSplit++;
+ ++NumIndexSplit;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
index 5004483..16c4a15 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopRotation.cpp
@@ -147,7 +147,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
continue; // PHI nodes don't count.
if (isa<DbgInfoIntrinsic>(OI))
continue; // Debug intrinsics don't count as size.
- Size++;
+ ++Size;
}
if (Size > MAX_HEADER_SIZE)
@@ -263,7 +263,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
preserveCanonicalLoopForm(LPM);
- NumRotated++;
+ ++NumRotated;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 86ea3eb..1f9b415 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -392,12 +392,13 @@ static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
}
-/// isMulSExtable - Return true if the given add can be sign-extended
+/// isMulSExtable - Return true if the given mul can be sign-extended
/// without changing its value.
-static bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) {
+static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
const Type *WideTy =
- IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
- return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy));
+ IntegerType::get(SE.getContext(),
+ SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
+ return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
}
/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
@@ -413,20 +414,28 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (LHS == RHS)
return SE.getConstant(LHS->getType(), 1);
- // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some
- // folding.
- if (RHS->isAllOnesValue())
- return SE.getMulExpr(LHS, RHS);
+ // Handle a few RHS special cases.
+ const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
+ if (RC) {
+ const APInt &RA = RC->getValue()->getValue();
+ // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
+ // some folding.
+ if (RA.isAllOnesValue())
+ return SE.getMulExpr(LHS, RC);
+ // Handle x /s 1 as x.
+ if (RA == 1)
+ return LHS;
+ }
// Check for a division of a constant by a constant.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
- const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
if (!RC)
return 0;
- if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0)
+ const APInt &LA = C->getValue()->getValue();
+ const APInt &RA = RC->getValue()->getValue();
+ if (LA.srem(RA) != 0)
return 0;
- return SE.getConstant(C->getValue()->getValue()
- .sdiv(RC->getValue()->getValue()));
+ return SE.getConstant(LA.sdiv(RA));
}
// Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
@@ -440,6 +449,7 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (!Step) return 0;
return SE.getAddRecExpr(Start, Step, AR->getLoop());
}
+ return 0;
}
// Distribute the sdiv over add operands, if the add doesn't overflow.
@@ -455,10 +465,11 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
return SE.getAddExpr(Ops);
}
+ return 0;
}
// Check for a multiply operand that we can pull RHS out of.
- if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS))
+ if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
SmallVector<const SCEV *, 4> Ops;
bool Found = false;
@@ -475,6 +486,8 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
return Found ? SE.getMulExpr(Ops) : 0;
}
+ return 0;
+ }
// Otherwise we don't know.
return 0;
@@ -546,7 +559,7 @@ static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
case Intrinsic::x86_sse2_storel_dq:
- if (II->getOperand(1) == OperandVal)
+ if (II->getArgOperand(0) == OperandVal)
isAddress = true;
break;
}
@@ -568,7 +581,7 @@ static const Type *getAccessType(const Instruction *Inst) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
case Intrinsic::x86_sse2_storel_dq:
- AccessTy = II->getOperand(1)->getType();
+ AccessTy = II->getArgOperand(0)->getType();
break;
}
}
@@ -976,6 +989,8 @@ public:
void dump() const;
};
+}
+
/// HasFormula - Test whether this use as a formula which has the same
/// registers as the given formula.
bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
@@ -1203,6 +1218,32 @@ static bool isAlwaysFoldable(const SCEV *S,
return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
}
+namespace {
+
+/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
+/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
+struct UseMapDenseMapInfo {
+ static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() {
+ return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic);
+ }
+
+ static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() {
+ return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic);
+ }
+
+ static unsigned
+ getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) {
+ unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first);
+ Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second));
+ return Result;
+ }
+
+ static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS,
+ const std::pair<const SCEV *, LSRUse::KindType> &RHS) {
+ return LHS == RHS;
+ }
+};
+
/// FormulaSorter - This class implements an ordering for formulae which sorts
/// the by their standalone cost.
class FormulaSorter {
@@ -1275,7 +1316,9 @@ class LSRInstance {
}
// Support for sharing of LSRUses between LSRFixups.
- typedef DenseMap<const SCEV *, size_t> UseMapTy;
+ typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>,
+ size_t,
+ UseMapDenseMapInfo> UseMapTy;
UseMapTy UseMap;
bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
@@ -1613,8 +1656,11 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
NewRHS = Sel->getOperand(1);
else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
NewRHS = Sel->getOperand(2);
+ else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
+ NewRHS = SU->getValue();
else
- llvm_unreachable("Max doesn't match expected pattern!");
+ // Max doesn't match expected pattern.
+ return Cond;
// Determine the new comparison opcode. It may be signed or unsigned,
// and the original comparison may be either equality or inequality.
@@ -1805,6 +1851,8 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
NewMaxOffset = NewOffset;
}
// Check for a mismatched access type, and fall back conservatively as needed.
+ // TODO: Be less conservative when the type is similar and can use the same
+ // addressing modes.
if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
NewAccessTy = Type::getVoidTy(AccessTy->getContext());
@@ -1833,7 +1881,7 @@ LSRInstance::getUse(const SCEV *&Expr,
}
std::pair<UseMapTy::iterator, bool> P =
- UseMap.insert(std::make_pair(Expr, 0));
+ UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0));
if (!P.second) {
// A use already existed with this base.
size_t LUIdx = P.first->second;
@@ -1919,7 +1967,7 @@ void LSRInstance::CollectInterestingTypesAndFactors() {
Strides.insert(AR->getStepRecurrence(SE));
Worklist.push_back(AR->getStart());
} else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
- Worklist.insert(Worklist.end(), Add->op_begin(), Add->op_end());
+ Worklist.append(Add->op_begin(), Add->op_end());
}
} while (!Worklist.empty());
}
@@ -2086,7 +2134,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
const SCEV *S = Worklist.pop_back_val();
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
- Worklist.insert(Worklist.end(), N->op_begin(), N->op_end());
+ Worklist.append(N->op_begin(), N->op_end());
else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
Worklist.push_back(C->getOperand());
else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
@@ -2095,8 +2143,12 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
} else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (!Inserted.insert(U)) continue;
const Value *V = U->getValue();
- if (const Instruction *Inst = dyn_cast<Instruction>(V))
+ if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
+ // Look for instructions defined outside the loop.
if (L->contains(Inst)) continue;
+ } else if (isa<UndefValue>(V))
+ // Undef doesn't have a live range, so it doesn't matter.
+ continue;
for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
UI != UE; ++UI) {
const Instruction *UserInst = dyn_cast<Instruction>(*UI);
@@ -2155,20 +2207,23 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
/// separate registers. If C is non-null, multiply each subexpression by C.
static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
SmallVectorImpl<const SCEV *> &Ops,
+ SmallVectorImpl<const SCEV *> &UninterestingOps,
+ const Loop *L,
ScalarEvolution &SE) {
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
// Break out add operands.
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
I != E; ++I)
- CollectSubexprs(*I, C, Ops, SE);
+ CollectSubexprs(*I, C, Ops, UninterestingOps, L, SE);
return;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
if (!AR->getStart()->isZero()) {
CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
- AR->getLoop()), C, Ops, SE);
- CollectSubexprs(AR->getStart(), C, Ops, SE);
+ AR->getLoop()),
+ C, Ops, UninterestingOps, L, SE);
+ CollectSubexprs(AR->getStart(), C, Ops, UninterestingOps, L, SE);
return;
}
} else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
@@ -2178,13 +2233,17 @@ static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
CollectSubexprs(Mul->getOperand(1),
C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
- Ops, SE);
+ Ops, UninterestingOps, L, SE);
return;
}
}
- // Otherwise use the value itself.
- Ops.push_back(C ? SE.getMulExpr(C, S) : S);
+ // Otherwise use the value itself. Loop-variant "unknown" values are
+ // uninteresting; we won't be able to do anything meaningful with them.
+ if (!C && isa<SCEVUnknown>(S) && !S->isLoopInvariant(L))
+ UninterestingOps.push_back(S);
+ else
+ Ops.push_back(C ? SE.getMulExpr(C, S) : S);
}
/// GenerateReassociations - Split out subexpressions from adds and the bases of
@@ -2198,8 +2257,15 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
const SCEV *BaseReg = Base.BaseRegs[i];
- SmallVector<const SCEV *, 8> AddOps;
- CollectSubexprs(BaseReg, 0, AddOps, SE);
+ SmallVector<const SCEV *, 8> AddOps, UninterestingAddOps;
+ CollectSubexprs(BaseReg, 0, AddOps, UninterestingAddOps, L, SE);
+
+ // Add any uninteresting values as one register, as we won't be able to
+ // form any interesting reassociation opportunities with them. They'll
+ // just have to be added inside the loop no matter what we do.
+ if (!UninterestingAddOps.empty())
+ AddOps.push_back(SE.getAddExpr(UninterestingAddOps));
+
if (AddOps.size() == 1) continue;
for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
@@ -2212,11 +2278,10 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
continue;
// Collect all operands except *J.
- SmallVector<const SCEV *, 8> InnerAddOps;
- for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(),
- KE = AddOps.end(); K != KE; ++K)
- if (K != J)
- InnerAddOps.push_back(*K);
+ SmallVector<const SCEV *, 8> InnerAddOps
+ ( ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
+ InnerAddOps.append
+ (next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end());
// Don't leave just a constant behind in a register if the constant could
// be folded into an immediate field.
@@ -2297,7 +2362,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula Base) {
// TODO: For now, just add the min and max offset, because it usually isn't
// worthwhile looking at everything inbetween.
- SmallVector<int64_t, 4> Worklist;
+ SmallVector<int64_t, 2> Worklist;
Worklist.push_back(LU.MinOffset);
if (LU.MaxOffset != LU.MinOffset)
Worklist.push_back(LU.MaxOffset);
@@ -2311,7 +2376,14 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
LU.Kind, LU.AccessTy, TLI)) {
- F.BaseRegs[i] = SE.getAddExpr(G, SE.getConstant(G->getType(), *I));
+ // Add the offset to the base register.
+ const SCEV *NewG = SE.getAddExpr(G, SE.getConstant(G->getType(), *I));
+ // If it cancelled out, drop the base register, otherwise update it.
+ if (NewG->isZero()) {
+ std::swap(F.BaseRegs[i], F.BaseRegs.back());
+ F.BaseRegs.pop_back();
+ } else
+ F.BaseRegs[i] = NewG;
(void)InsertFormula(LU, LUIdx, F);
}
@@ -2350,13 +2422,12 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
for (SmallSetVector<int64_t, 8>::const_iterator
I = Factors.begin(), E = Factors.end(); I != E; ++I) {
int64_t Factor = *I;
- Formula F = Base;
// Check that the multiplication doesn't overflow.
- if (F.AM.BaseOffs == INT64_MIN && Factor == -1)
+ if (Base.AM.BaseOffs == INT64_MIN && Factor == -1)
continue;
- F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
- if (F.AM.BaseOffs / Factor != Base.AM.BaseOffs)
+ int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
+ if (NewBaseOffs / Factor != Base.AM.BaseOffs)
continue;
// Check that multiplying with the use offset doesn't overflow.
@@ -2367,6 +2438,9 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
if (Offset / Factor != LU.MinOffset)
continue;
+ Formula F = Base;
+ F.AM.BaseOffs = NewBaseOffs;
+
// Check that this scale is legal.
if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
continue;
diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
index ae7bf40..0c900ff 100644
--- a/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -445,7 +445,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
// This is a very ad-hoc heuristic.
if (Metrics.NumInsts > Threshold ||
Metrics.NumBlocks * 5 > Threshold ||
- Metrics.NeverInline) {
+ Metrics.containsIndirectBr || Metrics.isRecursive) {
DEBUG(dbgs() << "NOT unswitching loop %"
<< currentLoop->getHeader()->getName() << ", cost too high: "
<< currentLoop->getBlocks().size() << "\n");
@@ -457,21 +457,21 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
}
// RemapInstruction - Convert the instruction operands from referencing the
-// current values into those specified by ValueMap.
+// current values into those specified by VMap.
//
static inline void RemapInstruction(Instruction *I,
- DenseMap<const Value *, Value*> &ValueMap) {
+ ValueMap<const Value *, Value*> &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end()) Op = It->second;
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ if (It != VMap.end()) Op = It->second;
I->setOperand(op, Op);
}
}
/// CloneLoop - Recursively clone the specified loop and all of its children,
/// mapping the blocks with the specified map.
-static Loop *CloneLoop(Loop *L, Loop *PL, DenseMap<const Value*, Value*> &VM,
+static Loop *CloneLoop(Loop *L, Loop *PL, ValueMap<const Value*, Value*> &VM,
LoopInfo *LI, LPPassManager *LPM) {
Loop *New = new Loop();
LPM->insertLoop(New, PL);
@@ -615,11 +615,11 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// the loop preheader and exit blocks), keeping track of the mapping between
// the instructions and blocks.
NewBlocks.reserve(LoopBlocks.size());
- DenseMap<const Value*, Value*> ValueMap;
+ ValueMap<const Value*, Value*> VMap;
for (unsigned i = 0, e = LoopBlocks.size(); i != e; ++i) {
- BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], ValueMap, ".us", F);
+ BasicBlock *NewBB = CloneBasicBlock(LoopBlocks[i], VMap, ".us", F);
NewBlocks.push_back(NewBB);
- ValueMap[LoopBlocks[i]] = NewBB; // Keep the BB mapping.
+ VMap[LoopBlocks[i]] = NewBB; // Keep the BB mapping.
LPM->cloneBasicBlockSimpleAnalysis(LoopBlocks[i], NewBB, L);
}
@@ -629,7 +629,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
NewBlocks[0], F->end());
// Now we create the new Loop object for the versioned loop.
- Loop *NewLoop = CloneLoop(L, L->getParentLoop(), ValueMap, LI, LPM);
+ Loop *NewLoop = CloneLoop(L, L->getParentLoop(), VMap, LI, LPM);
Loop *ParentLoop = L->getParentLoop();
if (ParentLoop) {
// Make sure to add the cloned preheader and exit blocks to the parent loop
@@ -638,7 +638,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
}
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
- BasicBlock *NewExit = cast<BasicBlock>(ValueMap[ExitBlocks[i]]);
+ BasicBlock *NewExit = cast<BasicBlock>(VMap[ExitBlocks[i]]);
// The new exit block should be in the same loop as the old one.
if (Loop *ExitBBLoop = LI->getLoopFor(ExitBlocks[i]))
ExitBBLoop->addBasicBlockToLoop(NewExit, LI->getBase());
@@ -653,8 +653,8 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (BasicBlock::iterator I = ExitSucc->begin(); isa<PHINode>(I); ++I) {
PN = cast<PHINode>(I);
Value *V = PN->getIncomingValueForBlock(ExitBlocks[i]);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(V);
- if (It != ValueMap.end()) V = It->second;
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(V);
+ if (It != VMap.end()) V = It->second;
PN->addIncoming(V, NewExit);
}
}
@@ -663,7 +663,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i)
for (BasicBlock::iterator I = NewBlocks[i]->begin(),
E = NewBlocks[i]->end(); I != E; ++I)
- RemapInstruction(I, ValueMap);
+ RemapInstruction(I, VMap);
// Rewrite the original preheader to select between versions of the loop.
BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator());
diff --git a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 3611b8e..0e566c5 100644
--- a/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -632,7 +632,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
// Remove the memcpy
MD.removeInstruction(cpy);
cpy->eraseFromParent();
- NumMemCpyInstr++;
+ ++NumMemCpyInstr;
return true;
}
@@ -710,7 +710,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (MD.getDependency(C) == dep) {
MD.removeInstruction(M);
M->eraseFromParent();
- NumMemCpyInstr++;
+ ++NumMemCpyInstr;
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
index 5aca9cdc..98452f5 100644
--- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -407,13 +407,14 @@ static Value *NegateValue(Value *V, Instruction *BI) {
// Okay, we need to materialize a negated version of V with an instruction.
// Scan the use lists of V to see if we have one already.
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
- if (!BinaryOperator::isNeg(*UI)) continue;
+ User *U = *UI;
+ if (!BinaryOperator::isNeg(U)) continue;
// We found one! Now we have to make sure that the definition dominates
// this use. We do this by moving it to the entry block (if it is a
// non-instruction value) or right after the definition. These negates will
// be zapped by reassociate later, so we don't need much finesse here.
- BinaryOperator *TheNeg = cast<BinaryOperator>(*UI);
+ BinaryOperator *TheNeg = cast<BinaryOperator>(U);
// Verify that the negate is in this function, V might be a constant expr.
if (TheNeg->getParent()->getParent() != BI->getParent()->getParent())
diff --git a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 5ca9ce3..dd445f6 100644
--- a/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -926,7 +926,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
DeleteDeadInstructions();
AI->eraseFromParent();
- NumReplaced++;
+ ++NumReplaced;
}
/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
@@ -965,11 +965,11 @@ void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
isSafeGEP(GEPI, AI, GEPOffset, Info);
if (!Info.isUnsafe)
isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
- } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
if (Length)
isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
- UI.getOperandNo() == 1, Info);
+ UI.getOperandNo() == CallInst::ArgOffset, Info);
else
MarkUnsafe(Info);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -1272,6 +1272,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If there is an other pointer, we want to convert it to the same pointer
// type as AI has, so we can GEP through it safely.
if (OtherPtr) {
+ unsigned AddrSpace =
+ cast<PointerType>(OtherPtr->getType())->getAddressSpace();
// Remove bitcasts and all-zero GEPs from OtherPtr. This is an
// optimization, but it's also required to detect the corner case where
@@ -1279,20 +1281,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// OtherPtr may be a bitcast or GEP that currently being rewritten. (This
// function is only called for mem intrinsics that access the whole
// aggregate, so non-zero GEPs are not an issue here.)
- while (1) {
- if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) {
- OtherPtr = BC->getOperand(0);
- continue;
- }
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) {
- // All zero GEPs are effectively bitcasts.
- if (GEP->hasAllZeroIndices()) {
- OtherPtr = GEP->getOperand(0);
- continue;
- }
- }
- break;
- }
+ OtherPtr = OtherPtr->stripPointerCasts();
+
// Copying the alloca to itself is a no-op: just delete it.
if (OtherPtr == AI || OtherPtr == NewElts[0]) {
// This code will run twice for a no-op memcpy -- once for each operand.
@@ -1304,15 +1294,13 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
return;
}
- if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
- if (BCE->getOpcode() == Instruction::BitCast)
- OtherPtr = BCE->getOperand(0);
-
// If the pointer is not the right type, insert a bitcast to the right
// type.
- if (OtherPtr->getType() != AI->getType())
- OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
- MI);
+ const Type *NewTy =
+ PointerType::get(AI->getType()->getElementType(), AddrSpace);
+
+ if (OtherPtr->getType() != NewTy)
+ OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI);
}
// Process each element of the aggregate.
@@ -1373,7 +1361,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// If the stored element is zero (common case), just store a null
// constant.
Constant *StoreVal;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) {
if (CI->isZero()) {
StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
} else {
@@ -1436,7 +1424,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Value *Ops[] = {
SROADest ? EltPtr : OtherElt, // Dest ptr
SROADest ? OtherElt : EltPtr, // Src ptr
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
+ ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
// Align
ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign),
MI->getVolatileCst()
@@ -1451,8 +1439,8 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
} else {
assert(isa<MemSetInst>(MI));
Value *Ops[] = {
- EltPtr, MI->getOperand(2), // Dest, Value,
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
+ EltPtr, MI->getArgOperand(1), // Dest, Value,
+ ConstantInt::get(MI->getArgOperand(2)->getType(), EltSize), // Size
Zero, // Align
ConstantInt::get(Type::getInt1Ty(MI->getContext()), 0) // isVolatile
};
@@ -1655,7 +1643,12 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
}
- ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
+ // Don't create an 'or x, 0' on the first iteration.
+ if (!isa<Constant>(ResultVal) ||
+ !cast<Constant>(ResultVal)->isNullValue())
+ ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
+ else
+ ResultVal = SrcField;
}
// Handle tail padding by truncating the result
@@ -1794,7 +1787,7 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
if (isOffset) return false;
// If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 1) return false;
+ if (UI.getOperandNo() != CallInst::ArgOffset) return false;
// If the source of the memcpy/move is not a constant global, reject it.
if (!PointsToConstantGlobal(MI->getSource()))
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 9744100..49d93a2 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -137,6 +137,9 @@ static bool MarkAliveBlocks(BasicBlock *BB,
// they should be changed to unreachable by passes that can't modify the
// CFG.
if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ // Don't touch volatile stores.
+ if (SI->isVolatile()) continue;
+
Value *Ptr = SI->getOperand(1);
if (isa<UndefValue>(Ptr) ||
diff --git a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 7414be7..b1c6191 100644
--- a/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -66,6 +66,11 @@ public:
this->TD = TD;
if (CI->getCalledFunction())
Context = &CI->getCalledFunction()->getContext();
+
+ // We never change the calling convention.
+ if (CI->getCallingConv() != llvm::CallingConv::C)
+ return NULL;
+
return CallOptimizer(CI->getCalledFunction(), CI, B);
}
};
@@ -92,6 +97,20 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
return true;
}
+/// IsOnlyUsedInEqualityComparison - Return true if it is only used in equality
+/// comparisons with With.
+static bool IsOnlyUsedInEqualityComparison(Value *V, Value *With) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (IC->isEquality() && IC->getOperand(1) == With)
+ continue;
+ // Unknown instruction.
+ return false;
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// String and Memory LibCall Optimizations
//===----------------------------------------------------------------------===//
@@ -110,8 +129,8 @@ struct StrCatOpt : public LibCallOptimization {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
@@ -162,12 +181,12 @@ struct StrNCatOpt : public StrCatOpt {
return 0;
// Extract some information from the instruction
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
uint64_t Len;
// We don't do anything if length is not constant
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Len = LengthArg->getZExtValue();
else
return 0;
@@ -207,11 +226,11 @@ struct StrChrOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getReturnType())
return 0;
- Value *SrcStr = CI->getOperand(1);
+ Value *SrcStr = CI->getArgOperand(0);
// If the second operand is non-constant, see if we can compute the length
// of the input string and turn this into memchr.
- ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getOperand(2));
+ ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
if (CharC == 0) {
// These optimizations require TargetData.
if (!TD) return 0;
@@ -220,7 +239,7 @@ struct StrChrOpt : public LibCallOptimization {
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
- return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
+ return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
ConstantInt::get(TD->getIntPtrType(*Context), Len),
B, TD);
}
@@ -260,12 +279,12 @@ struct StrCmpOpt : public LibCallOptimization {
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strcmp(x,x) -> 0
return ConstantInt::get(CI->getType(), 0);
@@ -308,19 +327,19 @@ struct StrNCmpOpt : public LibCallOptimization {
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Str1P = CI->getOperand(1), *Str2P = CI->getOperand(2);
+ Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
return ConstantInt::get(CI->getType(), 0);
// Get the length argument if it is constant.
uint64_t Length;
- if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getOperand(3)))
+ if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Length = LengthArg->getZExtValue();
else
return 0;
@@ -328,6 +347,9 @@ struct StrNCmpOpt : public LibCallOptimization {
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
+ if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD);
+
std::string Str1, Str2;
bool HasStr1 = GetConstantStringInfo(Str1P, Str1);
bool HasStr2 = GetConstantStringInfo(Str2P, Str2);
@@ -365,7 +387,7 @@ struct StrCpyOpt : public LibCallOptimization {
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
- Value *Dst = CI->getOperand(1), *Src = CI->getOperand(2);
+ Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // strcpy(x,x) -> x
return Src;
@@ -381,7 +403,7 @@ struct StrCpyOpt : public LibCallOptimization {
if (OptChkCall)
EmitMemCpyChk(Dst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len),
- CI->getOperand(3), B, TD);
+ CI->getArgOperand(2), B, TD);
else
EmitMemCpy(Dst, Src,
ConstantInt::get(TD->getIntPtrType(*Context), Len),
@@ -402,9 +424,9 @@ struct StrNCpyOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy())
return 0;
- Value *Dst = CI->getOperand(1);
- Value *Src = CI->getOperand(2);
- Value *LenOp = CI->getOperand(3);
+ Value *Dst = CI->getArgOperand(0);
+ Value *Src = CI->getArgOperand(1);
+ Value *LenOp = CI->getArgOperand(2);
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
@@ -452,7 +474,7 @@ struct StrLenOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy())
return 0;
- Value *Src = CI->getOperand(1);
+ Value *Src = CI->getArgOperand(0);
// Constant folding: strlen("xyz") -> 3
if (uint64_t Len = GetStringLength(Src))
@@ -477,7 +499,7 @@ struct StrToOpt : public LibCallOptimization {
!FT->getParamType(1)->isPointerTy())
return 0;
- Value *EndPtr = CI->getOperand(2);
+ Value *EndPtr = CI->getArgOperand(1);
if (isa<ConstantPointerNull>(EndPtr)) {
CI->setOnlyReadsMemory();
CI->addAttribute(1, Attribute::NoCapture);
@@ -500,17 +522,34 @@ struct StrStrOpt : public LibCallOptimization {
return 0;
// fold strstr(x, x) -> x.
- if (CI->getOperand(1) == CI->getOperand(2))
- return B.CreateBitCast(CI->getOperand(1), CI->getType());
+ if (CI->getArgOperand(0) == CI->getArgOperand(1))
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
+
+ // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
+ if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD);
+ Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
+ StrLen, B, TD);
+ for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
+ UI != UE; ) {
+ ICmpInst *Old = cast<ICmpInst>(UI++);
+ Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
+ ConstantInt::getNullValue(StrNCmp->getType()),
+ "cmp");
+ Old->replaceAllUsesWith(Cmp);
+ Old->eraseFromParent();
+ }
+ return CI;
+ }
// See if either input string is a constant string.
std::string SearchStr, ToFindStr;
- bool HasStr1 = GetConstantStringInfo(CI->getOperand(1), SearchStr);
- bool HasStr2 = GetConstantStringInfo(CI->getOperand(2), ToFindStr);
+ bool HasStr1 = GetConstantStringInfo(CI->getArgOperand(0), SearchStr);
+ bool HasStr2 = GetConstantStringInfo(CI->getArgOperand(1), ToFindStr);
// fold strstr(x, "") -> x.
if (HasStr2 && ToFindStr.empty())
- return B.CreateBitCast(CI->getOperand(1), CI->getType());
+ return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
// If both strings are known, constant fold it.
if (HasStr1 && HasStr2) {
@@ -520,14 +559,14 @@ struct StrStrOpt : public LibCallOptimization {
return Constant::getNullValue(CI->getType());
// strstr("abcd", "bc") -> gep((char*)"abcd", 1)
- Value *Result = CastToCStr(CI->getOperand(1), B);
+ Value *Result = CastToCStr(CI->getArgOperand(0), B);
Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr");
return B.CreateBitCast(Result, CI->getType());
}
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1)
- return B.CreateBitCast(EmitStrChr(CI->getOperand(1), ToFindStr[0], B, TD),
+ return B.CreateBitCast(EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD),
CI->getType());
return 0;
}
@@ -545,13 +584,13 @@ struct MemCmpOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy(32))
return 0;
- Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
+ Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
if (LHS == RHS) // memcmp(s,s,x) -> 0
return Constant::getNullValue(CI->getType());
// Make sure we have a constant length.
- ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getOperand(3));
+ ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
if (!LenC) return 0;
uint64_t Len = LenC->getZExtValue();
@@ -598,9 +637,9 @@ struct MemCpyOpt : public LibCallOptimization {
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, false, B, TD);
- return CI->getOperand(1);
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -620,9 +659,9 @@ struct MemMoveOpt : public LibCallOptimization {
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
- EmitMemMove(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), 1, false, B, TD);
- return CI->getOperand(1);
+ EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), 1, false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -642,10 +681,10 @@ struct MemSetOpt : public LibCallOptimization {
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
- Value *Val = B.CreateIntCast(CI->getOperand(2), Type::getInt8Ty(*Context),
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), Type::getInt8Ty(*Context),
false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
- return CI->getOperand(1);
+ EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
+ return CI->getArgOperand(0);
}
};
@@ -666,7 +705,7 @@ struct PowOpt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op1 = CI->getOperand(1), *Op2 = CI->getOperand(2);
+ Value *Op1 = CI->getArgOperand(0), *Op2 = CI->getArgOperand(1);
if (ConstantFP *Op1C = dyn_cast<ConstantFP>(Op1)) {
if (Op1C->isExactlyValue(1.0)) // pow(1.0, x) -> 1.0
return Op1C;
@@ -720,18 +759,18 @@ struct Exp2Opt : public LibCallOptimization {
!FT->getParamType(0)->isFloatingPointTy())
return 0;
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
// Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32
// Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < 32
Value *LdExpArg = 0;
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
LdExpArg = B.CreateSExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
} else if (UIToFPInst *OpC = dyn_cast<UIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() < 32)
LdExpArg = B.CreateZExt(OpC->getOperand(0),
- Type::getInt32Ty(*Context), "tmp");
+ Type::getInt32Ty(*Context), "tmp");
}
if (LdExpArg) {
@@ -772,7 +811,7 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
return 0;
// If this is something like 'floor((double)floatval)', convert to floorf.
- FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getOperand(1));
+ FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
return 0;
@@ -797,11 +836,11 @@ struct FFSOpt : public LibCallOptimization {
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
- !FT->getReturnType()->isIntegerTy(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
!FT->getParamType(0)->isIntegerTy())
return 0;
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
// Constant fold.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
@@ -821,7 +860,7 @@ struct FFSOpt : public LibCallOptimization {
Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType), "tmp");
return B.CreateSelect(Cond, V,
- ConstantInt::get(Type::getInt32Ty(*Context), 0));
+ ConstantInt::get(Type::getInt32Ty(*Context), 0));
}
};
@@ -837,7 +876,7 @@ struct IsDigitOpt : public LibCallOptimization {
return 0;
// isdigit(c) -> (c-'0') <u 10
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = B.CreateSub(Op, ConstantInt::get(Type::getInt32Ty(*Context), '0'),
"isdigittmp");
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 10),
@@ -858,7 +897,7 @@ struct IsAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c <u 128
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Op = B.CreateICmpULT(Op, ConstantInt::get(Type::getInt32Ty(*Context), 128),
"isascii");
return B.CreateZExt(Op, CI->getType());
@@ -877,7 +916,7 @@ struct AbsOpt : public LibCallOptimization {
return 0;
// abs(x) -> x >s -1 ? x : -x
- Value *Op = CI->getOperand(1);
+ Value *Op = CI->getArgOperand(0);
Value *Pos = B.CreateICmpSGT(Op,
Constant::getAllOnesValue(Op->getType()),
"ispos");
@@ -899,7 +938,7 @@ struct ToAsciiOpt : public LibCallOptimization {
return 0;
// isascii(c) -> c & 0x7f
- return B.CreateAnd(CI->getOperand(1),
+ return B.CreateAnd(CI->getArgOperand(0),
ConstantInt::get(CI->getType(),0x7F));
}
};
@@ -922,7 +961,7 @@ struct PrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(1), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(0), FormatStr))
return 0;
// Empty format string -> noop.
@@ -954,20 +993,20 @@ struct PrintFOpt : public LibCallOptimization {
}
// Optimize specific format strings.
- // printf("%c", chr) --> putchar(*(i8*)dst)
- if (FormatStr == "%c" && CI->getNumOperands() > 2 &&
- CI->getOperand(2)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getOperand(2), B, TD);
+ // printf("%c", chr) --> putchar(chr)
+ if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
+ CI->getArgOperand(1)->getType()->isIntegerTy()) {
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD);
if (CI->use_empty()) return CI;
return B.CreateIntCast(Res, CI->getType(), true);
}
// printf("%s\n", str) --> puts(str)
- if (FormatStr == "%s\n" && CI->getNumOperands() > 2 &&
- CI->getOperand(2)->getType()->isPointerTy() &&
+ if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
+ CI->getArgOperand(1)->getType()->isPointerTy() &&
CI->use_empty()) {
- EmitPutS(CI->getOperand(2), B, TD);
+ EmitPutS(CI->getArgOperand(1), B, TD);
return CI;
}
return 0;
@@ -988,11 +1027,11 @@ struct SPrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// If we just have a format string (nothing else crazy) transform it.
- if (CI->getNumOperands() == 3) {
+ if (CI->getNumArgOperands() == 2) {
// Make sure there's no % in the constant array. We could try to handle
// %% -> % in the future if we cared.
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
@@ -1003,7 +1042,7 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2), // Copy the nul byte.
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), // Copy the nul byte.
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()+1), 1, false, B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
@@ -1011,16 +1050,17 @@ struct SPrintFOpt : public LibCallOptimization {
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
- if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->getNumOperands() <4)
+ if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
+ CI->getNumArgOperands() < 3)
return 0;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
- if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
- Value *V = B.CreateTrunc(CI->getOperand(3),
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ Value *V = B.CreateTrunc(CI->getArgOperand(2),
Type::getInt8Ty(*Context), "char");
- Value *Ptr = CastToCStr(CI->getOperand(1), B);
+ Value *Ptr = CastToCStr(CI->getArgOperand(0), B);
B.CreateStore(V, Ptr);
Ptr = B.CreateGEP(Ptr, ConstantInt::get(Type::getInt32Ty(*Context), 1),
"nul");
@@ -1034,13 +1074,13 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
- if (!CI->getOperand(3)->getType()->isPointerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
- Value *Len = EmitStrLen(CI->getOperand(3), B, TD);
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD);
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
- EmitMemCpy(CI->getOperand(1), CI->getOperand(3), IncLen, 1, false, B, TD);
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(2), IncLen, 1, false, B, TD);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
@@ -1064,8 +1104,8 @@ struct FWriteOpt : public LibCallOptimization {
return 0;
// Get the element size and count.
- ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getOperand(2));
- ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getOperand(3));
+ ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
if (!SizeC || !CountC) return 0;
uint64_t Bytes = SizeC->getZExtValue()*CountC->getZExtValue();
@@ -1075,8 +1115,8 @@ struct FWriteOpt : public LibCallOptimization {
// If this is writing one byte, turn it into fputc.
if (Bytes == 1) { // fwrite(S,1,1,F) -> fputc(S[0],F)
- Value *Char = B.CreateLoad(CastToCStr(CI->getOperand(1), B), "char");
- EmitFPutC(Char, CI->getOperand(4), B, TD);
+ Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
+ EmitFPutC(Char, CI->getArgOperand(3), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
@@ -1100,11 +1140,11 @@ struct FPutsOpt : public LibCallOptimization {
return 0;
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
- uint64_t Len = GetStringLength(CI->getOperand(1));
+ uint64_t Len = GetStringLength(CI->getArgOperand(0));
if (!Len) return 0;
- EmitFWrite(CI->getOperand(1),
+ EmitFWrite(CI->getArgOperand(0),
ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getOperand(2), B, TD);
+ CI->getArgOperand(1), B, TD);
return CI; // Known to have no uses (see above).
}
};
@@ -1123,11 +1163,11 @@ struct FPrintFOpt : public LibCallOptimization {
// All the optimizations depend on the format string.
std::string FormatStr;
- if (!GetConstantStringInfo(CI->getOperand(2), FormatStr))
+ if (!GetConstantStringInfo(CI->getArgOperand(1), FormatStr))
return 0;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
- if (CI->getNumOperands() == 3) {
+ if (CI->getNumArgOperands() == 2) {
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
return 0; // We found a format specifier.
@@ -1135,31 +1175,32 @@ struct FPrintFOpt : public LibCallOptimization {
// These optimizations require TargetData.
if (!TD) return 0;
- EmitFWrite(CI->getOperand(2),
+ EmitFWrite(CI->getArgOperand(1),
ConstantInt::get(TD->getIntPtrType(*Context),
FormatStr.size()),
- CI->getOperand(1), B, TD);
+ CI->getArgOperand(0), B, TD);
return ConstantInt::get(CI->getType(), FormatStr.size());
}
// The remaining optimizations require the format string to be "%s" or "%c"
// and have an extra operand.
- if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->getNumOperands() <4)
+ if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
+ CI->getNumArgOperands() < 3)
return 0;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
- // fprintf(F, "%c", chr) --> *(i8*)dst = chr
- if (!CI->getOperand(3)->getType()->isIntegerTy()) return 0;
- EmitFPutC(CI->getOperand(3), CI->getOperand(1), B, TD);
+ // fprintf(F, "%c", chr) --> fputc(chr, F)
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
return ConstantInt::get(CI->getType(), 1);
}
if (FormatStr[1] == 's') {
- // fprintf(F, "%s", str) -> fputs(str, F)
- if (!CI->getOperand(3)->getType()->isPointerTy() || !CI->use_empty())
+ // fprintf(F, "%s", str) --> fputs(str, F)
+ if (!CI->getArgOperand(2)->getType()->isPointerTy() || !CI->use_empty())
return 0;
- EmitFPutS(CI->getOperand(3), CI->getOperand(1), B, TD);
+ EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD);
return CI;
}
return 0;
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp b/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
index 2306a77..9208238 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailDuplication.cpp
@@ -206,12 +206,13 @@ static BasicBlock *FindObviousSharedDomOf(BasicBlock *SrcBlock,
// there is only one other pred, get it, otherwise we can't handle it.
PI = pred_begin(DstBlock); PE = pred_end(DstBlock);
BasicBlock *DstOtherPred = 0;
- if (*PI == SrcBlock) {
+ BasicBlock *P = *PI;
+ if (P == SrcBlock) {
if (++PI == PE) return 0;
DstOtherPred = *PI;
if (++PI != PE) return 0;
} else {
- DstOtherPred = *PI;
+ DstOtherPred = P;
if (++PI == PE || *PI != SrcBlock || ++PI != PE) return 0;
}
diff --git a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 5ad5de2..01c8e5d 100644
--- a/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/contrib/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -16,9 +16,9 @@
// transformation from taking place, though currently the analysis cannot
// support moving any really useful instructions (only dead ones).
// 2. This pass transforms functions that are prevented from being tail
-// recursive by an associative expression to use an accumulator variable,
-// thus compiling the typical naive factorial or 'fib' implementation into
-// efficient code.
+// recursive by an associative and commutative expression to use an
+// accumulator variable, thus compiling the typical naive factorial or
+// 'fib' implementation into efficient code.
// 3. TRE is performed if the function returns void, if the return
// returns the result returned by the call, or if the function returns a
// run-time constant on all exits from the function. It is possible, though
@@ -60,6 +60,7 @@
#include "llvm/Pass.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Analysis/Loads.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/CFG.h"
#include "llvm/ADT/Statistic.h"
@@ -252,7 +253,7 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
// If we are passing this argument into call as the corresponding
// argument operand, then the argument is dynamically constant.
// Otherwise, we cannot transform this function safely.
- if (CI->getOperand(ArgNo+1) == Arg)
+ if (CI->getArgOperand(ArgNo) == Arg)
return true;
}
@@ -269,16 +270,16 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
}
// getCommonReturnValue - Check to see if the function containing the specified
-// return instruction and tail call consistently returns the same
-// runtime-constant value at all exit points. If so, return the returned value.
+// tail call consistently returns the same runtime-constant value at all exit
+// points except for IgnoreRI. If so, return the returned value.
//
-static Value *getCommonReturnValue(ReturnInst *TheRI, CallInst *CI) {
- Function *F = TheRI->getParent()->getParent();
+static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
+ Function *F = CI->getParent()->getParent();
Value *ReturnedValue = 0;
for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator()))
- if (RI != TheRI) {
+ if (RI != IgnoreRI) {
Value *RetOp = RI->getOperand(0);
// We can only perform this transformation if the value returned is
@@ -301,9 +302,9 @@ static Value *getCommonReturnValue(ReturnInst *TheRI, CallInst *CI) {
///
Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
CallInst *CI) {
- if (!I->isAssociative()) return 0;
+ if (!I->isAssociative() || !I->isCommutative()) return 0;
assert(I->getNumOperands() == 2 &&
- "Associative operations should have 2 args!");
+ "Associative/commutative operations should have 2 args!");
// Exactly one operand should be the result of the call instruction...
if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
@@ -368,11 +369,16 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
return false;
}
- // If we are introducing accumulator recursion to eliminate associative
- // operations after the call instruction, this variable contains the initial
- // value for the accumulator. If this value is set, we actually perform
- // accumulator recursion elimination instead of simple tail recursion
- // elimination.
+ // If we are introducing accumulator recursion to eliminate operations after
+ // the call instruction that are both associative and commutative, the initial
+ // value for the accumulator is placed in this variable. If this value is set
+ // then we actually perform accumulator recursion elimination instead of
+ // simple tail recursion elimination. If the operation is an LLVM instruction
+ // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then
+ // we are handling the case when the return instruction returns a constant C
+ // which is different to the constant returned by other return instructions
+ // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
+ // special case of accumulator recursion, the operation being "return C".
Value *AccumulatorRecursionEliminationInitVal = 0;
Instruction *AccumulatorRecursionInstr = 0;
@@ -383,9 +389,9 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
for (BBI = CI, ++BBI; &*BBI != Ret; ++BBI)
if (!CanMoveAboveCall(BBI, CI)) {
// If we can't move the instruction above the call, it might be because it
- // is an associative operation that could be tranformed using accumulator
- // recursion elimination. Check to see if this is the case, and if so,
- // remember the initial accumulator value for later.
+ // is an associative and commutative operation that could be tranformed
+ // using accumulator recursion elimination. Check to see if this is the
+ // case, and if so, remember the initial accumulator value for later.
if ((AccumulatorRecursionEliminationInitVal =
CanTransformAccumulatorRecursion(BBI, CI))) {
// Yes, this is accumulator recursion. Remember which instruction
@@ -403,8 +409,18 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
!isa<UndefValue>(Ret->getReturnValue()) &&
AccumulatorRecursionEliminationInitVal == 0 &&
- !getCommonReturnValue(Ret, CI))
- return false;
+ !getCommonReturnValue(0, CI)) {
+ // One case remains that we are able to handle: the current return
+ // instruction returns a constant, and all other return instructions
+ // return a different constant.
+ if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret))
+ return false; // Current return instruction does not return a constant.
+ // Check that all other return instructions return a common constant. If
+ // so, record it in AccumulatorRecursionEliminationInitVal.
+ AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI);
+ if (!AccumulatorRecursionEliminationInitVal)
+ return false;
+ }
// OK! We can transform this tail call. If this is the first one found,
// create the new entry block, allowing us to branch back to the old entry.
@@ -453,8 +469,8 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// Ok, now that we know we have a pseudo-entry block WITH all of the
// required PHI nodes, add entries into the PHI node for the actual
// parameters passed into the tail-recursive call.
- for (unsigned i = 0, e = CI->getNumOperands()-1; i != e; ++i)
- ArgumentPHIs[i]->addIncoming(CI->getOperand(i+1), BB);
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i)
+ ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB);
// If we are introducing an accumulator variable to eliminate the recursion,
// do so now. Note that we _know_ that no subsequent tail recursion
@@ -464,8 +480,9 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
if (AccumulatorRecursionEliminationInitVal) {
Instruction *AccRecInstr = AccumulatorRecursionInstr;
// Start by inserting a new PHI node for the accumulator.
- PHINode *AccPN = PHINode::Create(AccRecInstr->getType(), "accumulator.tr",
- OldEntry->begin());
+ PHINode *AccPN =
+ PHINode::Create(AccumulatorRecursionEliminationInitVal->getType(),
+ "accumulator.tr", OldEntry->begin());
// Loop over all of the predecessors of the tail recursion block. For the
// real entry into the function we seed the PHI with the initial value,
@@ -475,20 +492,27 @@ bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
// it will not show up as a predecessor.
for (pred_iterator PI = pred_begin(OldEntry), PE = pred_end(OldEntry);
PI != PE; ++PI) {
- if (*PI == &F->getEntryBlock())
- AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, *PI);
+ BasicBlock *P = *PI;
+ if (P == &F->getEntryBlock())
+ AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P);
else
- AccPN->addIncoming(AccPN, *PI);
+ AccPN->addIncoming(AccPN, P);
}
- // Add an incoming argument for the current block, which is computed by our
- // associative accumulator instruction.
- AccPN->addIncoming(AccRecInstr, BB);
-
- // Next, rewrite the accumulator recursion instruction so that it does not
- // use the result of the call anymore, instead, use the PHI node we just
- // inserted.
- AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
+ if (AccRecInstr) {
+ // Add an incoming argument for the current block, which is computed by
+ // our associative and commutative accumulator instruction.
+ AccPN->addIncoming(AccRecInstr, BB);
+
+ // Next, rewrite the accumulator recursion instruction so that it does not
+ // use the result of the call anymore, instead, use the PHI node we just
+ // inserted.
+ AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
+ } else {
+ // Add an incoming argument for the current block, which is just the
+ // constant returned by the current return instruction.
+ AccPN->addIncoming(Ret->getReturnValue(), BB);
+ }
// Finally, rewrite any return instructions in the program to return the PHI
// node instead of the "initval" that they do currently. This loop will
diff --git a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
index ea9d1c1..4d64c85 100644
--- a/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/AddrModeMatcher.cpp
@@ -381,29 +381,28 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
const TargetLowering &TLI) {
std::vector<InlineAsm::ConstraintInfo>
Constraints = IA->ParseConstraints();
-
- unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
+
+ unsigned ArgNo = 0; // The argument of the CallInst.
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
-
+
// Compute the value type for each operand.
switch (OpInfo.Type) {
case InlineAsm::isOutput:
if (OpInfo.isIndirect)
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isInput:
- OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
+ OpInfo.CallOperandVal = CI->getArgOperand(ArgNo++);
break;
case InlineAsm::isClobber:
// Nothing to do.
break;
}
-
+
// Compute the constraint code and ConstraintType to use.
- TLI.ComputeConstraintToUse(OpInfo, SDValue(),
- OpInfo.ConstraintType == TargetLowering::C_Memory);
-
+ TLI.ComputeConstraintToUse(OpInfo, SDValue());
+
// If this asm operand is our Value*, and if it isn't an indirect memory
// operand, we can't fold it!
if (OpInfo.CallOperandVal == OpVal &&
@@ -411,7 +410,7 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
!OpInfo.isIndirect))
return false;
}
-
+
return true;
}
@@ -450,7 +449,7 @@ static bool FindAllMemoryUses(Instruction *I,
if (CallInst *CI = dyn_cast<CallInst>(U)) {
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
- if (IA == 0) return true;
+ if (!IA) return true;
// If this is a memory operand, we're cool, otherwise bail out.
if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
diff --git a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 2f1ae00..ec625b4 100644
--- a/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -558,121 +558,3 @@ void llvm::FindFunctionBackedges(const Function &F,
}
-
-
-
-/// AreEquivalentAddressValues - Test if A and B will obviously have the same
-/// value. This includes recognizing that %t0 and %t1 will have the same
-/// value in code like this:
-/// %t0 = getelementptr \@a, 0, 3
-/// store i32 0, i32* %t0
-/// %t1 = getelementptr \@a, 0, 3
-/// %t2 = load i32* %t1
-///
-static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
- // Test if the values are trivially equivalent.
- if (A == B) return true;
-
- // Test if the values come from identical arithmetic instructions.
- // Use isIdenticalToWhenDefined instead of isIdenticalTo because
- // this function is only used when one address use dominates the
- // other, which means that they'll always either have the same
- // value or one of them will have an undefined value.
- if (isa<BinaryOperator>(A) || isa<CastInst>(A) ||
- isa<PHINode>(A) || isa<GetElementPtrInst>(A))
- if (const Instruction *BI = dyn_cast<Instruction>(B))
- if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
- return true;
-
- // Otherwise they may not be equivalent.
- return false;
-}
-
-/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at the
-/// instruction before ScanFrom) checking to see if we have the value at the
-/// memory address *Ptr locally available within a small number of instructions.
-/// If the value is available, return it.
-///
-/// If not, return the iterator for the last validated instruction that the
-/// value would be live through. If we scanned the entire block and didn't find
-/// something that invalidates *Ptr or provides it, ScanFrom would be left at
-/// begin() and this returns null. ScanFrom could also be left
-///
-/// MaxInstsToScan specifies the maximum instructions to scan in the block. If
-/// it is set to 0, it will scan the whole block. You can also optionally
-/// specify an alias analysis implementation, which makes this more precise.
-Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
- BasicBlock::iterator &ScanFrom,
- unsigned MaxInstsToScan,
- AliasAnalysis *AA) {
- if (MaxInstsToScan == 0) MaxInstsToScan = ~0U;
-
- // If we're using alias analysis to disambiguate get the size of *Ptr.
- unsigned AccessSize = 0;
- if (AA) {
- const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
- AccessSize = AA->getTypeStoreSize(AccessTy);
- }
-
- while (ScanFrom != ScanBB->begin()) {
- // We must ignore debug info directives when counting (otherwise they
- // would affect codegen).
- Instruction *Inst = --ScanFrom;
- if (isa<DbgInfoIntrinsic>(Inst))
- continue;
-
- // Restore ScanFrom to expected value in case next test succeeds
- ScanFrom++;
-
- // Don't scan huge blocks.
- if (MaxInstsToScan-- == 0) return 0;
-
- --ScanFrom;
- // If this is a load of Ptr, the loaded value is available.
- if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
- if (AreEquivalentAddressValues(LI->getOperand(0), Ptr))
- return LI;
-
- if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
- // If this is a store through Ptr, the value is available!
- if (AreEquivalentAddressValues(SI->getOperand(1), Ptr))
- return SI->getOperand(0);
-
- // If Ptr is an alloca and this is a store to a different alloca, ignore
- // the store. This is a trivial form of alias analysis that is important
- // for reg2mem'd code.
- if ((isa<AllocaInst>(Ptr) || isa<GlobalVariable>(Ptr)) &&
- (isa<AllocaInst>(SI->getOperand(1)) ||
- isa<GlobalVariable>(SI->getOperand(1))))
- continue;
-
- // If we have alias analysis and it says the store won't modify the loaded
- // value, ignore the store.
- if (AA &&
- (AA->getModRefInfo(SI, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // Otherwise the store that may or may not alias the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
-
- // If this is some other instruction that may clobber Ptr, bail out.
- if (Inst->mayWriteToMemory()) {
- // If alias analysis claims that it really won't modify the load,
- // ignore it.
- if (AA &&
- (AA->getModRefInfo(Inst, Ptr, AccessSize) & AliasAnalysis::Mod) == 0)
- continue;
-
- // May modify the pointer, bail out.
- ++ScanFrom;
- return 0;
- }
- }
-
- // Got to the start of the block, we didn't find it, but are done for this
- // block.
- return 0;
-}
-
diff --git a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 8c25ad1..26f53c0 100644
--- a/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -106,11 +106,12 @@ bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
// If AllowIdenticalEdges is true, then we allow this edge to be considered
// non-critical iff all preds come from TI's block.
while (I != E) {
- if (*I != FirstPred)
+ const BasicBlock *P = *I;
+ if (P != FirstPred)
return true;
// Note: leave this as is until no one ever compiles with either gcc 4.0.1
// or Xcode 2. This seems to work around the pred_iterator assert in PR 2207
- E = pred_end(*I);
+ E = pred_end(P);
++I;
}
return false;
@@ -277,11 +278,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
OtherPreds.push_back(PN->getIncomingBlock(i));
} else {
for (pred_iterator I = pred_begin(DestBB), E = pred_end(DestBB);
- I != E; ++I)
- if (*I != NewBB)
- OtherPreds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (P != NewBB)
+ OtherPreds.push_back(P);
+ }
}
-
+
bool NewBBDominatesDestBB = true;
// Should we update DominatorTree information?
@@ -400,11 +403,13 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
bool HasPredOutsideOfLoop = false;
BasicBlock *Exit = ExitBlocks[i];
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit);
- I != E; ++I)
- if (TIL->contains(*I))
- Preds.push_back(*I);
+ I != E; ++I) {
+ BasicBlock *P = *I;
+ if (TIL->contains(P))
+ Preds.push_back(P);
else
HasPredOutsideOfLoop = true;
+ }
// If there are any preds not in the loop, we'll need to split
// the edges. The Preds.empty() check is needed because a block
// may appear multiple times in the list. We can't use
diff --git a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
index 767fa3a..7a9d007 100644
--- a/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -69,6 +69,31 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
return CI;
}
+/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
+ IRBuilder<> &B, const TargetData *TD) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[3];
+ AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
+ AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
+ Attribute::NoUnwind);
+
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI, 3),
+ B.getInt32Ty(),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context), NULL);
+ CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
+ CastToCStr(Ptr2, B), Len, "strncmp");
+
+ if (const Function *F = dyn_cast<Function>(StrNCmp->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
@@ -112,10 +137,10 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
Value *llvm::EmitMemCpy(Value *Dst, Value *Src, Value *Len, unsigned Align,
bool isVolatile, IRBuilder<> &B, const TargetData *TD) {
Module *M = B.GetInsertBlock()->getParent()->getParent();
- const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
- Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
+ const Type *ArgTys[3] = { Dst->getType(), Src->getType(), Len->getType() };
+ Value *MemCpy = Intrinsic::getDeclaration(M, Intrinsic::memcpy, ArgTys, 3);
return B.CreateCall5(MemCpy, Dst, Src, Len,
ConstantInt::get(B.getInt32Ty(), Align),
ConstantInt::get(B.getInt1Ty(), isVolatile));
@@ -395,11 +420,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ EmitMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
1, false, B, TD);
- replaceCall(CI->getOperand(1));
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -418,11 +443,11 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ EmitMemMove(CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2),
1, false, B, TD);
- replaceCall(CI->getOperand(1));
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -436,12 +461,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
FT->getParamType(2) != TD->getIntPtrType(Context) ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
false);
- EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), false, B, TD);
- replaceCall(CI->getOperand(1));
+ EmitMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), false, B, TD);
+ replaceCall(CI->getArgOperand(0));
return true;
}
return false;
@@ -462,8 +487,8 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
// st[rp]cpy_chk call which may fail at runtime if the size is too long.
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
- if (isFoldable(3, 2, true)) {
- Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD,
+ if (isFoldable(2 + CallInst::ArgOffset, 1 + CallInst::ArgOffset, true)) {
+ Value *Ret = EmitStrCpy(CI->getArgOperand(0), CI->getArgOperand(1), B, TD,
Name.substr(2, 6));
replaceCall(Ret);
return true;
@@ -479,10 +504,10 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD) {
!FT->getParamType(2)->isIntegerTy() ||
FT->getParamType(3) != TD->getIntPtrType(Context))
return false;
-
- if (isFoldable(4, 3, false)) {
- Value *Ret = EmitStrNCpy(CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), B, TD, Name.substr(2, 7));
+
+ if (isFoldable(3 + CallInst::ArgOffset, 2 + CallInst::ArgOffset, false)) {
+ Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TD, Name.substr(2, 7));
replaceCall(Ret);
return true;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 6d4fe4b..1dcfd57 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -32,7 +32,7 @@ using namespace llvm;
// CloneBasicBlock - See comments in Cloning.h
BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
const Twine &NameSuffix, Function *F,
ClonedCodeInfo *CodeInfo) {
BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "", F);
@@ -47,7 +47,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -72,7 +72,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
// ArgMap values.
//
void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix, ClonedCodeInfo *CodeInfo) {
assert(NameSuffix && "NameSuffix cannot be null!");
@@ -80,17 +80,17 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- assert(ValueMap.count(I) && "No mapping from source argument specified!");
+ assert(VMap.count(I) && "No mapping from source argument specified!");
#endif
// Clone any attributes.
if (NewFunc->arg_size() == OldFunc->arg_size())
NewFunc->copyAttributesFrom(OldFunc);
else {
- //Some arguments were deleted with the ValueMap. Copy arguments one by one
+ //Some arguments were deleted with the VMap. Copy arguments one by one
for (Function::const_arg_iterator I = OldFunc->arg_begin(),
E = OldFunc->arg_end(); I != E; ++I)
- if (Argument* Anew = dyn_cast<Argument>(ValueMap[I]))
+ if (Argument* Anew = dyn_cast<Argument>(VMap[I]))
Anew->addAttr( OldFunc->getAttributes()
.getParamAttributes(I->getArgNo() + 1));
NewFunc->setAttributes(NewFunc->getAttributes()
@@ -111,43 +111,43 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
const BasicBlock &BB = *BI;
// Create a new basic block and copy instructions into it!
- BasicBlock *CBB = CloneBasicBlock(&BB, ValueMap, NameSuffix, NewFunc,
+ BasicBlock *CBB = CloneBasicBlock(&BB, VMap, NameSuffix, NewFunc,
CodeInfo);
- ValueMap[&BB] = CBB; // Add basic block mapping.
+ VMap[&BB] = CBB; // Add basic block mapping.
if (ReturnInst *RI = dyn_cast<ReturnInst>(CBB->getTerminator()))
Returns.push_back(RI);
}
// Loop over all of the instructions in the function, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
- for (Function::iterator BB = cast<BasicBlock>(ValueMap[OldFunc->begin()]),
+ for (Function::iterator BB = cast<BasicBlock>(VMap[OldFunc->begin()]),
BE = NewFunc->end(); BB != BE; ++BB)
// Loop over all instructions, fixing each one as we find it...
for (BasicBlock::iterator II = BB->begin(); II != BB->end(); ++II)
- RemapInstruction(II, ValueMap);
+ RemapInstruction(II, VMap);
}
/// CloneFunction - Return a copy of the specified function, but without
/// embedding the function into another module. Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one. If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function. The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
/// updated to include mappings from all of the instructions and basicblocks in
/// the function from their old to new values.
///
Function *llvm::CloneFunction(const Function *F,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
ClonedCodeInfo *CodeInfo) {
std::vector<const Type*> ArgTypes;
// The user might be deleting arguments to the function by specifying them in
- // the ValueMap. If so, we need to not add the arguments to the arg ty vector
+ // the VMap. If so, we need to not add the arguments to the arg ty vector
//
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) // Haven't mapped the argument to anything yet?
+ if (VMap.count(I) == 0) // Haven't mapped the argument to anything yet?
ArgTypes.push_back(I->getType());
// Create a new function type...
@@ -161,13 +161,13 @@ Function *llvm::CloneFunction(const Function *F,
Function::arg_iterator DestI = NewF->arg_begin();
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
- if (ValueMap.count(I) == 0) { // Is this argument preserved?
+ if (VMap.count(I) == 0) { // Is this argument preserved?
DestI->setName(I->getName()); // Copy the name over...
- ValueMap[I] = DestI++; // Add mapping to ValueMap
+ VMap[I] = DestI++; // Add mapping to VMap
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(NewF, F, ValueMap, Returns, "", CodeInfo);
+ CloneFunctionInto(NewF, F, VMap, Returns, "", CodeInfo);
return NewF;
}
@@ -179,19 +179,19 @@ namespace {
struct PruningFunctionCloner {
Function *NewFunc;
const Function *OldFunc;
- DenseMap<const Value*, Value*> &ValueMap;
+ ValueToValueMapTy &VMap;
SmallVectorImpl<ReturnInst*> &Returns;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
const TargetData *TD;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
- DenseMap<const Value*, Value*> &valueMap,
+ ValueToValueMapTy &valueMap,
SmallVectorImpl<ReturnInst*> &returns,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
const TargetData *td)
- : NewFunc(newFunc), OldFunc(oldFunc), ValueMap(valueMap), Returns(returns),
+ : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), Returns(returns),
NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
}
@@ -202,7 +202,7 @@ namespace {
public:
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
- /// mapping its operands through ValueMap if they are available.
+ /// mapping its operands through VMap if they are available.
Constant *ConstantFoldMappedInstruction(const Instruction *I);
};
}
@@ -211,7 +211,7 @@ namespace {
/// anything that it can reach.
void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
std::vector<const BasicBlock*> &ToClone){
- Value *&BBEntry = ValueMap[BB];
+ Value *&BBEntry = VMap[BB];
// Have we already cloned this block?
if (BBEntry) return;
@@ -230,7 +230,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If this instruction constant folds, don't bother cloning the instruction,
// instead, just add the constant to the value map.
if (Constant *C = ConstantFoldMappedInstruction(II)) {
- ValueMap[II] = C;
+ VMap[II] = C;
continue;
}
@@ -238,7 +238,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (II->hasName())
NewInst->setName(II->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[II] = NewInst; // Add instruction map to value.
+ VMap[II] = NewInst; // Add instruction map to value.
hasCalls |= (isa<CallInst>(II) && !isa<DbgInfoIntrinsic>(II));
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
@@ -258,12 +258,12 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
if (Cond == 0)
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[BI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[BI->getCondition()]);
// Constant fold to uncond branch!
if (Cond) {
BasicBlock *Dest = BI->getSuccessor(!Cond->getZExtValue());
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -272,10 +272,10 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
if (Cond == 0) // Or known constant after constant prop in the callee...
- Cond = dyn_cast_or_null<ConstantInt>(ValueMap[SI->getCondition()]);
+ Cond = dyn_cast_or_null<ConstantInt>(VMap[SI->getCondition()]);
if (Cond) { // Constant fold to uncond branch!
BasicBlock *Dest = SI->getSuccessor(SI->findCaseValue(Cond));
- ValueMap[OldTI] = BranchInst::Create(Dest, NewBB);
+ VMap[OldTI] = BranchInst::Create(Dest, NewBB);
ToClone.push_back(Dest);
TerminatorDone = true;
}
@@ -286,7 +286,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
if (OldTI->hasName())
NewInst->setName(OldTI->getName()+NameSuffix);
NewBB->getInstList().push_back(NewInst);
- ValueMap[OldTI] = NewInst; // Add instruction map to value.
+ VMap[OldTI] = NewInst; // Add instruction map to value.
// Recursively clone any reachable successor blocks.
const TerminatorInst *TI = BB->getTerminator();
@@ -307,13 +307,13 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
}
/// ConstantFoldMappedInstruction - Constant fold the specified instruction,
-/// mapping its operands through ValueMap if they are available.
+/// mapping its operands through VMap if they are available.
Constant *PruningFunctionCloner::
ConstantFoldMappedInstruction(const Instruction *I) {
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
- ValueMap)))
+ VMap)))
Ops.push_back(Op);
else
return 0; // All operands not constant!
@@ -363,7 +363,7 @@ static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD) {
/// dead. Since this doesn't produce an exact copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueToValueMapTy &VMap,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
@@ -374,10 +374,10 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#ifndef NDEBUG
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
E = OldFunc->arg_end(); II != E; ++II)
- assert(ValueMap.count(II) && "No mapping from source argument specified!");
+ assert(VMap.count(II) && "No mapping from source argument specified!");
#endif
- PruningFunctionCloner PFC(NewFunc, OldFunc, ValueMap, Returns,
+ PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, Returns,
NameSuffix, CodeInfo, TD);
// Clone the entry block, and anything recursively reachable from it.
@@ -397,14 +397,14 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVector<const PHINode*, 16> PHIToResolve;
for (Function::const_iterator BI = OldFunc->begin(), BE = OldFunc->end();
BI != BE; ++BI) {
- BasicBlock *NewBB = cast_or_null<BasicBlock>(ValueMap[BI]);
+ BasicBlock *NewBB = cast_or_null<BasicBlock>(VMap[BI]);
if (NewBB == 0) continue; // Dead block.
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
// Loop over all of the instructions in the block, fixing up operand
- // references as we go. This uses ValueMap to do all the hard work.
+ // references as we go. This uses VMap to do all the hard work.
//
BasicBlock::iterator I = NewBB->begin();
@@ -455,7 +455,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
I->setMetadata(DbgKind, 0);
}
}
- RemapInstruction(I, ValueMap);
+ RemapInstruction(I, VMap);
}
}
@@ -465,19 +465,19 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
const PHINode *OPN = PHIToResolve[phino];
unsigned NumPreds = OPN->getNumIncomingValues();
const BasicBlock *OldBB = OPN->getParent();
- BasicBlock *NewBB = cast<BasicBlock>(ValueMap[OldBB]);
+ BasicBlock *NewBB = cast<BasicBlock>(VMap[OldBB]);
// Map operands for blocks that are live and remove operands for blocks
// that are dead.
for (; phino != PHIToResolve.size() &&
PHIToResolve[phino]->getParent() == OldBB; ++phino) {
OPN = PHIToResolve[phino];
- PHINode *PN = cast<PHINode>(ValueMap[OPN]);
+ PHINode *PN = cast<PHINode>(VMap[OPN]);
for (unsigned pred = 0, e = NumPreds; pred != e; ++pred) {
if (BasicBlock *MappedBlock =
- cast_or_null<BasicBlock>(ValueMap[PN->getIncomingBlock(pred)])) {
+ cast_or_null<BasicBlock>(VMap[PN->getIncomingBlock(pred)])) {
Value *InVal = MapValue(PN->getIncomingValue(pred),
- ValueMap);
+ VMap);
assert(InVal && "Unknown input value?");
PN->setIncomingValue(pred, InVal);
PN->setIncomingBlock(pred, MappedBlock);
@@ -531,15 +531,15 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
while ((PN = dyn_cast<PHINode>(I++))) {
Value *NV = UndefValue::get(PN->getType());
PN->replaceAllUsesWith(NV);
- assert(ValueMap[OldI] == PN && "ValueMap mismatch");
- ValueMap[OldI] = NV;
+ assert(VMap[OldI] == PN && "VMap mismatch");
+ VMap[OldI] = NV;
PN->eraseFromParent();
++OldI;
}
}
// NOTE: We cannot eliminate single entry phi nodes here, because of
- // ValueMap. Single entry phi nodes can have multiple ValueMap entries
- // pointing at them. Thus, deleting one would require scanning the ValueMap
+ // VMap. Single entry phi nodes can have multiple VMap entries
+ // pointing at them. Thus, deleting one would require scanning the VMap
// to update any entries in it that would require that. This would be
// really slow.
}
@@ -548,14 +548,14 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// and zap unconditional fall-through branches. This happen all the time when
// specializing code: code specialization turns conditional branches into
// uncond branches, and this code folds them.
- Function::iterator I = cast<BasicBlock>(ValueMap[&OldFunc->getEntryBlock()]);
+ Function::iterator I = cast<BasicBlock>(VMap[&OldFunc->getEntryBlock()]);
while (I != NewFunc->end()) {
BranchInst *BI = dyn_cast<BranchInst>(I->getTerminator());
if (!BI || BI->isConditional()) { ++I; continue; }
// Note that we can't eliminate uncond branches if the destination has
// single-entry PHI nodes. Eliminating the single-entry phi nodes would
- // require scanning the ValueMap to update any entries that point to the phi
+ // require scanning the VMap to update any entries that point to the phi
// node.
BasicBlock *Dest = BI->getSuccessor(0);
if (!Dest->getSinglePredecessor() || isa<PHINode>(Dest->begin())) {
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp b/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
index 38928dc..551b630 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneLoop.cpp
@@ -15,7 +15,6 @@
#include "llvm/BasicBlock.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/Dominators.h"
-#include "llvm/ADT/DenseMap.h"
using namespace llvm;
@@ -23,13 +22,13 @@ using namespace llvm;
/// CloneDominatorInfo - Clone basicblock's dominator tree and, if available,
/// dominance info. It is expected that basic block is already cloned.
static void CloneDominatorInfo(BasicBlock *BB,
- DenseMap<const Value *, Value *> &ValueMap,
+ ValueMap<const Value *, Value *> &VMap,
DominatorTree *DT,
DominanceFrontier *DF) {
assert (DT && "DominatorTree is not available");
- DenseMap<const Value *, Value*>::iterator BI = ValueMap.find(BB);
- assert (BI != ValueMap.end() && "BasicBlock clone is missing");
+ ValueMap<const Value *, Value*>::iterator BI = VMap.find(BB);
+ assert (BI != VMap.end() && "BasicBlock clone is missing");
BasicBlock *NewBB = cast<BasicBlock>(BI->second);
// NewBB already got dominator info.
@@ -43,11 +42,11 @@ static void CloneDominatorInfo(BasicBlock *BB,
// NewBB's dominator is either BB's dominator or BB's dominator's clone.
BasicBlock *NewBBDom = BBDom;
- DenseMap<const Value *, Value*>::iterator BBDomI = ValueMap.find(BBDom);
- if (BBDomI != ValueMap.end()) {
+ ValueMap<const Value *, Value*>::iterator BBDomI = VMap.find(BBDom);
+ if (BBDomI != VMap.end()) {
NewBBDom = cast<BasicBlock>(BBDomI->second);
if (!DT->getNode(NewBBDom))
- CloneDominatorInfo(BBDom, ValueMap, DT, DF);
+ CloneDominatorInfo(BBDom, VMap, DT, DF);
}
DT->addNewBlock(NewBB, NewBBDom);
@@ -60,8 +59,8 @@ static void CloneDominatorInfo(BasicBlock *BB,
for (DominanceFrontier::DomSetType::iterator I = S.begin(), E = S.end();
I != E; ++I) {
BasicBlock *DB = *I;
- DenseMap<const Value*, Value*>::iterator IDM = ValueMap.find(DB);
- if (IDM != ValueMap.end())
+ ValueMap<const Value*, Value*>::iterator IDM = VMap.find(DB);
+ if (IDM != VMap.end())
NewDFSet.insert(cast<BasicBlock>(IDM->second));
else
NewDFSet.insert(DB);
@@ -71,10 +70,10 @@ static void CloneDominatorInfo(BasicBlock *BB,
}
}
-/// CloneLoop - Clone Loop. Clone dominator info. Populate ValueMap
+/// CloneLoop - Clone Loop. Clone dominator info. Populate VMap
/// using old blocks to new blocks mapping.
Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
- DenseMap<const Value *, Value *> &ValueMap, Pass *P) {
+ ValueMap<const Value *, Value *> &VMap, Pass *P) {
DominatorTree *DT = NULL;
DominanceFrontier *DF = NULL;
@@ -104,8 +103,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- BasicBlock *NewBB = CloneBasicBlock(BB, ValueMap, ".clone");
- ValueMap[BB] = NewBB;
+ BasicBlock *NewBB = CloneBasicBlock(BB, VMap, ".clone");
+ VMap[BB] = NewBB;
if (P)
LPM->cloneBasicBlockSimpleAnalysis(BB, NewBB, L);
NewLoop->addBasicBlockToLoop(NewBB, LI->getBase());
@@ -117,7 +116,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
- CloneDominatorInfo(BB, ValueMap, DT, DF);
+ CloneDominatorInfo(BB, VMap, DT, DF);
}
// Process sub loops
@@ -125,7 +124,7 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
LoopNest.push_back(*I);
} while (!LoopNest.empty());
- // Remap instructions to reference operands from ValueMap.
+ // Remap instructions to reference operands from VMap.
for(SmallVector<BasicBlock *, 16>::iterator NBItr = NewBlocks.begin(),
NBE = NewBlocks.end(); NBItr != NBE; ++NBItr) {
BasicBlock *NB = *NBItr;
@@ -135,8 +134,8 @@ Loop *llvm::CloneLoop(Loop *OrigL, LPPassManager *LPM, LoopInfo *LI,
for (unsigned index = 0, num_ops = Insn->getNumOperands();
index != num_ops; ++index) {
Value *Op = Insn->getOperand(index);
- DenseMap<const Value *, Value *>::iterator OpItr = ValueMap.find(Op);
- if (OpItr != ValueMap.end())
+ ValueMap<const Value *, Value *>::iterator OpItr = VMap.find(Op);
+ if (OpItr != VMap.end())
Insn->setOperand(index, OpItr->second);
}
}
diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
index b87c082..fc603d2 100644
--- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp
@@ -28,12 +28,12 @@ using namespace llvm;
Module *llvm::CloneModule(const Module *M) {
// Create the value map that maps things from the old module over to the new
// module.
- DenseMap<const Value*, Value*> ValueMap;
- return CloneModule(M, ValueMap);
+ ValueToValueMapTy VMap;
+ return CloneModule(M, VMap);
}
Module *llvm::CloneModule(const Module *M,
- DenseMap<const Value*, Value*> &ValueMap) {
+ ValueToValueMapTy &VMap) {
// First off, we need to create the new module...
Module *New = new Module(M->getModuleIdentifier(), M->getContext());
New->setDataLayout(M->getDataLayout());
@@ -51,7 +51,7 @@ Module *llvm::CloneModule(const Module *M,
New->addLibrary(*I);
// Loop over all of the global variables, making corresponding globals in the
- // new module. Here we add them to the ValueMap and to the new Module. We
+ // new module. Here we add them to the VMap and to the new Module. We
// don't worry about attributes or initializers, they will come later.
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
@@ -62,7 +62,7 @@ Module *llvm::CloneModule(const Module *M,
GlobalValue::ExternalLinkage, 0,
I->getName());
GV->setAlignment(I->getAlignment());
- ValueMap[I] = GV;
+ VMap[I] = GV;
}
// Loop over the functions in the module, making external functions as before
@@ -71,13 +71,13 @@ Module *llvm::CloneModule(const Module *M,
Function::Create(cast<FunctionType>(I->getType()->getElementType()),
GlobalValue::ExternalLinkage, I->getName(), New);
NF->copyAttributesFrom(I);
- ValueMap[I] = NF;
+ VMap[I] = NF;
}
// Loop over the aliases in the module
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I)
- ValueMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
+ VMap[I] = new GlobalAlias(I->getType(), GlobalAlias::ExternalLinkage,
I->getName(), NULL, New);
// Now that all of the things that global variable initializer can refer to
@@ -86,10 +86,10 @@ Module *llvm::CloneModule(const Module *M,
//
for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
I != E; ++I) {
- GlobalVariable *GV = cast<GlobalVariable>(ValueMap[I]);
+ GlobalVariable *GV = cast<GlobalVariable>(VMap[I]);
if (I->hasInitializer())
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
- ValueMap)));
+ VMap)));
GV->setLinkage(I->getLinkage());
GV->setThreadLocal(I->isThreadLocal());
GV->setConstant(I->isConstant());
@@ -98,17 +98,17 @@ Module *llvm::CloneModule(const Module *M,
// Similarly, copy over function bodies now...
//
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
- Function *F = cast<Function>(ValueMap[I]);
+ Function *F = cast<Function>(VMap[I]);
if (!I->isDeclaration()) {
Function::arg_iterator DestI = F->arg_begin();
for (Function::const_arg_iterator J = I->arg_begin(); J != I->arg_end();
++J) {
DestI->setName(J->getName());
- ValueMap[J] = DestI++;
+ VMap[J] = DestI++;
}
SmallVector<ReturnInst*, 8> Returns; // Ignore returns cloned.
- CloneFunctionInto(F, I, ValueMap, Returns);
+ CloneFunctionInto(F, I, VMap, Returns);
}
F->setLinkage(I->getLinkage());
@@ -117,11 +117,37 @@ Module *llvm::CloneModule(const Module *M,
// And aliases
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
- GlobalAlias *GA = cast<GlobalAlias>(ValueMap[I]);
+ GlobalAlias *GA = cast<GlobalAlias>(VMap[I]);
GA->setLinkage(I->getLinkage());
if (const Constant* C = I->getAliasee())
- GA->setAliasee(cast<Constant>(MapValue(C, ValueMap)));
+ GA->setAliasee(cast<Constant>(MapValue(C, VMap)));
}
-
+
+ // And named metadata....
+ for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
+ E = M->named_metadata_end(); I != E; ++I) {
+ const NamedMDNode &NMD = *I;
+ SmallVector<MDNode*, 4> MDs;
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ MDs.push_back(cast<MDNode>(MapValue(NMD.getOperand(i), VMap)));
+ NamedMDNode::Create(New->getContext(), NMD.getName(),
+ MDs.data(), MDs.size(), New);
+ }
+
+ // Update metadata attach with instructions.
+ for (Module::iterator MI = New->begin(), ME = New->end(); MI != ME; ++MI)
+ for (Function::iterator FI = MI->begin(), FE = MI->end();
+ FI != FE; ++FI)
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
+ BI != BE; ++BI) {
+ SmallVector<std::pair<unsigned, MDNode *>, 4 > MDs;
+ BI->getAllMetadata(MDs);
+ for (SmallVector<std::pair<unsigned, MDNode *>, 4>::iterator
+ MDI = MDs.begin(), MDE = MDs.end(); MDI != MDE; ++MDI) {
+ Value *MappedValue = MapValue(MDI->second, VMap);
+ if (MDI->second != MappedValue && MappedValue)
+ BI->setMetadata(MDI->first, cast<MDNode>(MappedValue));
+ }
+ }
return New;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
index c908b4a..8e82a02 100644
--- a/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -35,7 +35,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
I.eraseFromParent();
return 0;
}
-
+
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
@@ -46,7 +46,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Change all of the users of the instruction to read from the stack slot
// instead.
while (!I.use_empty()) {
@@ -67,7 +67,7 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Value *&V = Loads[PN->getIncomingBlock(i)];
if (V == 0) {
// Insert the load into the predecessor block
- V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
+ V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@@ -110,8 +110,8 @@ AllocaInst* llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
- P->eraseFromParent();
- return 0;
+ P->eraseFromParent();
+ return 0;
}
// Create a stack slot to hold the value.
@@ -124,23 +124,23 @@ AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Iterate over each operand, insert store in each predecessor.
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
- assert(II->getParent() != P->getIncomingBlock(i) &&
+ assert(II->getParent() != P->getIncomingBlock(i) &&
"Invoke edge not supported yet"); II=II;
}
- new StoreInst(P->getIncomingValue(i), Slot,
+ new StoreInst(P->getIncomingValue(i), Slot,
P->getIncomingBlock(i)->getTerminator());
}
-
+
// Insert load in place of the phi and replace all uses.
Value *V = new LoadInst(Slot, P->getName()+".reload", P);
P->replaceAllUsesWith(V);
-
+
// Delete phi.
P->eraseFromParent();
-
+
return Slot;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 91390bc..598e7d2 100644
--- a/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -63,7 +63,8 @@ static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
// Next, create the new invoke instruction, inserting it at the end
// of the old basic block.
- SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
+ ImmutableCallSite CS(CI);
+ SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
InvokeInst *II =
InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
InvokeArgs.begin(), InvokeArgs.end(),
@@ -169,7 +170,7 @@ static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
Function::iterator FirstNewBlock,
- DenseMap<const Value*, Value*> &ValueMap,
+ ValueMap<const Value*, Value*> &VMap,
InlineFunctionInfo &IFI) {
CallGraph &CG = *IFI.CG;
const Function *Caller = CS.getInstruction()->getParent()->getParent();
@@ -192,9 +193,9 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
for (; I != E; ++I) {
const Value *OrigCall = I->first;
- DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
+ ValueMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == ValueMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == 0)
continue;
// If the call was inlined, but then constant folded, there is no edge to
@@ -285,8 +286,8 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
- { // Scope to destroy ValueMap after cloning.
- DenseMap<const Value*, Value*> ValueMap;
+ { // Scope to destroy VMap after cloning.
+ ValueMap<const Value*, Value*> VMap;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
@@ -351,16 +352,20 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Uses of the argument in the function should use our new alloca
// instead.
ActualArg = NewAlloca;
+
+ // Calls that we inline may use the new alloca, so we need to clear
+ // their 'tail' flags.
+ MustClearTailCallFlags = true;
}
- ValueMap[I] = ActualArg;
+ VMap[I] = ActualArg;
}
// We want the inliner to prune the code as it copies. We would LOVE to
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, Returns, ".i",
&InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
@@ -368,7 +373,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
// Update the callgraph if requested.
if (IFI.CG)
- UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, IFI);
+ UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
}
// If there are any alloca instructions in the block that used to be the entry
diff --git a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
index df6e603..e90c30b 100644
--- a/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LCSSA.cpp
@@ -190,14 +190,15 @@ bool LCSSA::ProcessInstruction(Instruction *Inst,
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
- BasicBlock *UserBB = cast<Instruction>(*UI)->getParent();
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
+ User *U = *UI;
+ BasicBlock *UserBB = cast<Instruction>(U)->getParent();
+ if (PHINode *PN = dyn_cast<PHINode>(U))
UserBB = PN->getIncomingBlock(UI);
if (InstBB != UserBB && !inLoop(UserBB))
UsesToRewrite.push_back(&UI.getUse());
}
-
+
// If there are no uses outside the loop, exit with no change.
if (UsesToRewrite.empty()) return false;
diff --git a/contrib/llvm/lib/Transforms/Utils/Local.cpp b/contrib/llvm/lib/Transforms/Utils/Local.cpp
index d03f7a6..8e91138 100644
--- a/contrib/llvm/lib/Transforms/Utils/Local.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/Local.cpp
@@ -35,111 +35,6 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
-// Local analysis.
-//
-
-/// getUnderlyingObjectWithOffset - Strip off up to MaxLookup GEPs and
-/// bitcasts to get back to the underlying object being addressed, keeping
-/// track of the offset in bytes from the GEPs relative to the result.
-/// This is closely related to Value::getUnderlyingObject but is located
-/// here to avoid making VMCore depend on TargetData.
-static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
- uint64_t &ByteOffset,
- unsigned MaxLookup = 6) {
- if (!V->getType()->isPointerTy())
- return V;
- for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
- if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- if (!GEP->hasAllConstantIndices())
- return V;
- SmallVector<Value*, 8> Indices(GEP->op_begin() + 1, GEP->op_end());
- ByteOffset += TD->getIndexedOffset(GEP->getPointerOperandType(),
- &Indices[0], Indices.size());
- V = GEP->getPointerOperand();
- } else if (Operator::getOpcode(V) == Instruction::BitCast) {
- V = cast<Operator>(V)->getOperand(0);
- } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (GA->mayBeOverridden())
- return V;
- V = GA->getAliasee();
- } else {
- return V;
- }
- assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- }
- return V;
-}
-
-/// isSafeToLoadUnconditionally - Return true if we know that executing a load
-/// from this value cannot trap. If it is not obviously safe to load from the
-/// specified pointer, we do a quick local scan of the basic block containing
-/// ScanFrom, to determine if the address is already accessed.
-bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const TargetData *TD) {
- uint64_t ByteOffset = 0;
- Value *Base = V;
- if (TD)
- Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
-
- const Type *BaseType = 0;
- unsigned BaseAlign = 0;
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
- // An alloca is safe to load from as load as it is suitably aligned.
- BaseType = AI->getAllocatedType();
- BaseAlign = AI->getAlignment();
- } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(Base)) {
- // Global variables are safe to load from but their size cannot be
- // guaranteed if they are overridden.
- if (!isa<GlobalAlias>(GV) && !GV->mayBeOverridden()) {
- BaseType = GV->getType()->getElementType();
- BaseAlign = GV->getAlignment();
- }
- }
-
- if (BaseType && BaseType->isSized()) {
- if (TD && BaseAlign == 0)
- BaseAlign = TD->getPrefTypeAlignment(BaseType);
-
- if (Align <= BaseAlign) {
- if (!TD)
- return true; // Loading directly from an alloca or global is OK.
-
- // Check if the load is within the bounds of the underlying object.
- const PointerType *AddrTy = cast<PointerType>(V->getType());
- uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
- if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
- (Align == 0 || (ByteOffset % Align) == 0))
- return true;
- }
- }
-
- // Otherwise, be a little bit aggressive by scanning the local block where we
- // want to check to see if the pointer is already being loaded or stored
- // from/to. If so, the previous load or store would have already trapped,
- // so there is no harm doing an extra load (also, CSE will later eliminate
- // the load entirely).
- BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
-
- while (BBI != E) {
- --BBI;
-
- // If we see a free or a call which may write to memory (i.e. which might do
- // a free) the pointer could be marked invalid.
- if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
- !isa<DbgInfoIntrinsic>(BBI))
- return false;
-
- if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
- if (LI->getOperand(0) == V) return true;
- } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
- if (SI->getOperand(1) == V) return true;
- }
- }
- return false;
-}
-
-
-//===----------------------------------------------------------------------===//
// Local constant propagation.
//
@@ -411,7 +306,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
WeakVH BIHandle(BI);
ReplaceAndSimplifyAllUses(Inst, V, TD);
MadeChange = true;
- if (BIHandle == 0)
+ if (BIHandle != BI)
BI = BB->begin();
continue;
}
@@ -459,12 +354,13 @@ void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
// value into all of its uses.
assert(PNV != PN && "hasConstantValue broken");
+ Value *OldPhiIt = PhiIt;
ReplaceAndSimplifyAllUses(PN, PNV, TD);
// If recursive simplification ended up deleting the next PHI node we would
// iterate to, then our iterator is invalid, restart scanning from the top
// of the block.
- if (PhiIt == 0) PhiIt = &BB->front();
+ if (PhiIt != OldPhiIt) PhiIt = &BB->front();
}
}
@@ -537,9 +433,11 @@ static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
// Use that list to make another list of common predecessors of BB and Succ
BlockSet CommonPreds;
for (pred_iterator PI = pred_begin(Succ), PE = pred_end(Succ);
- PI != PE; ++PI)
- if (BBPreds.count(*PI))
- CommonPreds.insert(*PI);
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (BBPreds.count(P))
+ CommonPreds.insert(P);
+ }
// Shortcut, if there are no common predecessors, merging is always safe
if (CommonPreds.empty())
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
index 1ef3c32..4f4edf3 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopSimplify.cpp
@@ -142,9 +142,11 @@ ReprocessLoop:
if (*BB == L->getHeader()) continue;
SmallPtrSet<BasicBlock *, 4> BadPreds;
- for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI)
- if (!L->contains(*PI))
- BadPreds.insert(*PI);
+ for (pred_iterator PI = pred_begin(*BB), PE = pred_end(*BB); PI != PE; ++PI){
+ BasicBlock *P = *PI;
+ if (!L->contains(P))
+ BadPreds.insert(P);
+ }
// Delete each unique out-of-loop (and thus dead) predecessor.
for (SmallPtrSet<BasicBlock *, 4>::iterator I = BadPreds.begin(),
@@ -192,7 +194,7 @@ ReprocessLoop:
if (!Preheader) {
Preheader = InsertPreheaderForLoop(L);
if (Preheader) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -215,7 +217,7 @@ ReprocessLoop:
// allowed.
if (!L->contains(*PI)) {
if (RewriteLoopExitBlock(L, ExitBlock)) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
break;
@@ -244,7 +246,7 @@ ReprocessLoop:
// loop header.
LoopLatch = InsertUniqueBackedgeBlock(L, Preheader);
if (LoopLatch) {
- NumInserted++;
+ ++NumInserted;
Changed = true;
}
}
@@ -353,16 +355,18 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
// Compute the set of predecessors of the loop that are not in the loop.
SmallVector<BasicBlock*, 8> OutsideBlocks;
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
- PI != PE; ++PI)
- if (!L->contains(*PI)) { // Coming in from outside the loop?
+ PI != PE; ++PI) {
+ BasicBlock *P = *PI;
+ if (!L->contains(P)) { // Coming in from outside the loop?
// If the loop is branched to from an indirect branch, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>((*PI)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
// Keep track of it.
- OutsideBlocks.push_back(*PI);
+ OutsideBlocks.push_back(P);
}
+ }
// Split out the loop pre-header.
BasicBlock *NewBB =
@@ -385,13 +389,15 @@ BasicBlock *LoopSimplify::InsertPreheaderForLoop(Loop *L) {
/// outside of the loop.
BasicBlock *LoopSimplify::RewriteLoopExitBlock(Loop *L, BasicBlock *Exit) {
SmallVector<BasicBlock*, 8> LoopBlocks;
- for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I)
- if (L->contains(*I)) {
+ for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
+ BasicBlock *P = *I;
+ if (L->contains(P)) {
// Don't do this if the loop is exited via an indirect branch.
- if (isa<IndirectBrInst>((*I)->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return 0;
- LoopBlocks.push_back(*I);
+ LoopBlocks.push_back(P);
}
+ }
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
BasicBlock *NewBB = SplitBlockPredecessors(Exit, &LoopBlocks[0],
@@ -559,10 +565,11 @@ Loop *LoopSimplify::SeparateNestedLoop(Loop *L, LPPassManager &LPM) {
// Determine which blocks should stay in L and which should be moved out to
// the Outer loop now.
std::set<BasicBlock*> BlocksInL;
- for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); PI!=E; ++PI)
- if (DT->dominates(Header, *PI))
- AddBlockAndPredsToSet(*PI, Header, BlocksInL);
-
+ for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
+ BasicBlock *P = *PI;
+ if (DT->dominates(Header, P))
+ AddBlockAndPredsToSet(P, Header, BlocksInL);
+ }
// Scan all of the loop children of L, moving them to OuterLoop if they are
// not part of the inner loop.
@@ -610,8 +617,10 @@ LoopSimplify::InsertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader) {
// Figure out which basic blocks contain back-edges to the loop header.
std::vector<BasicBlock*> BackedgeBlocks;
- for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I)
- if (*I != Preheader) BackedgeBlocks.push_back(*I);
+ for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
+ BasicBlock *P = *I;
+ if (P != Preheader) BackedgeBlocks.push_back(P);
+ }
// Create and insert the new backedge block...
BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),
diff --git a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
index 84fd1eb..e0e07e7 100644
--- a/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LoopUnroll.cpp
@@ -37,13 +37,13 @@ STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
static inline void RemapInstruction(Instruction *I,
- DenseMap<const Value *, Value*> &ValueMap) {
+ ValueMap<const Value *, Value*> &VMap) {
for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
Value *Op = I->getOperand(op);
- DenseMap<const Value *, Value*>::iterator It = ValueMap.find(Op);
- if (It != ValueMap.end())
+ ValueMap<const Value *, Value*>::iterator It = VMap.find(Op);
+ if (It != VMap.end())
I->setOperand(op, It->second);
}
}
@@ -183,7 +183,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
// For the first iteration of the loop, we should use the precloned values for
// PHI nodes. Insert associations now.
- typedef DenseMap<const Value*, Value*> ValueToValueMapTy;
+ typedef ValueMap<const Value*, Value*> ValueToValueMapTy;
ValueToValueMapTy LastValueMap;
std::vector<PHINode*> OrigPHINode;
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
@@ -205,26 +205,26 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
for (std::vector<BasicBlock*>::iterator BB = LoopBlocks.begin(),
E = LoopBlocks.end(); BB != E; ++BB) {
- ValueToValueMapTy ValueMap;
- BasicBlock *New = CloneBasicBlock(*BB, ValueMap, "." + Twine(It));
+ ValueToValueMapTy VMap;
+ BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
Header->getParent()->getBasicBlockList().push_back(New);
// Loop over all of the PHI nodes in the block, changing them to use the
// incoming values from the previous block.
if (*BB == Header)
for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
- PHINode *NewPHI = cast<PHINode>(ValueMap[OrigPHINode[i]]);
+ PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
if (Instruction *InValI = dyn_cast<Instruction>(InVal))
if (It > 1 && L->contains(InValI))
InVal = LastValueMap[InValI];
- ValueMap[OrigPHINode[i]] = InVal;
+ VMap[OrigPHINode[i]] = InVal;
New->getInstList().erase(NewPHI);
}
// Update our running map of newest clones
LastValueMap[*BB] = New;
- for (ValueToValueMapTy::iterator VI = ValueMap.begin(), VE = ValueMap.end();
+ for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
VI != VE; ++VI)
LastValueMap[VI->first] = VI->second;
diff --git a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
index 0ed8c72..2696e69 100644
--- a/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/LowerInvoke.cpp
@@ -45,6 +45,7 @@
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetLowering.h"
@@ -62,10 +63,7 @@ static cl::opt<bool> ExpensiveEHSupport("enable-correct-eh-support",
namespace {
class LowerInvoke : public FunctionPass {
// Used for both models.
- Constant *WriteFn;
Constant *AbortFn;
- Value *AbortMessage;
- unsigned AbortMessageLength;
// Used for expensive EH support.
const Type *JBLinkTy;
@@ -92,10 +90,8 @@ namespace {
}
private:
- void createAbortMessage(Module *M);
- void writeAbortMessage(Instruction *IB);
bool insertCheapEHSupport(Function &F);
- void splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes);
+ void splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*>&Invokes);
void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
AllocaInst *InvokeNum, AllocaInst *StackPtr,
SwitchInst *CatchSwitch);
@@ -123,7 +119,6 @@ FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
bool LowerInvoke::doInitialization(Module &M) {
const Type *VoidPtrTy =
Type::getInt8PtrTy(M.getContext());
- AbortMessage = 0;
if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers.
unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
@@ -175,68 +170,14 @@ bool LowerInvoke::doInitialization(Module &M) {
// We need the 'write' and 'abort' functions for both models.
AbortFn = M.getOrInsertFunction("abort", Type::getVoidTy(M.getContext()),
(Type *)0);
-#if 0 // "write" is Unix-specific.. code is going away soon anyway.
- WriteFn = M.getOrInsertFunction("write", Type::VoidTy, Type::Int32Ty,
- VoidPtrTy, Type::Int32Ty, (Type *)0);
-#else
- WriteFn = 0;
-#endif
return true;
}
-void LowerInvoke::createAbortMessage(Module *M) {
- if (useExpensiveEHSupport) {
- // The abort message for expensive EH support tells the user that the
- // program 'unwound' without an 'invoke' instruction.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "ERROR: Exception thrown, but not caught!\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2,
- Constant::getNullValue(Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- } else {
- // The abort message for cheap EH support tells the user that EH is not
- // enabled.
- Constant *Msg =
- ConstantArray::get(M->getContext(),
- "Exception handler needed, but not enabled."
- "Recompile program with -enable-correct-eh-support.\n");
- AbortMessageLength = Msg->getNumOperands()-1; // don't include \0
-
- GlobalVariable *MsgGV = new GlobalVariable(*M, Msg->getType(), true,
- GlobalValue::InternalLinkage,
- Msg, "abortmsg");
- std::vector<Constant*> GEPIdx(2, Constant::getNullValue(
- Type::getInt32Ty(M->getContext())));
- AbortMessage = ConstantExpr::getGetElementPtr(MsgGV, &GEPIdx[0], 2);
- }
-}
-
-
-void LowerInvoke::writeAbortMessage(Instruction *IB) {
-#if 0
- if (AbortMessage == 0)
- createAbortMessage(IB->getParent()->getParent()->getParent());
-
- // These are the arguments we WANT...
- Value* Args[3];
- Args[0] = ConstantInt::get(Type::Int32Ty, 2);
- Args[1] = AbortMessage;
- Args[2] = ConstantInt::get(Type::Int32Ty, AbortMessageLength);
- (new CallInst(WriteFn, Args, 3, "", IB))->setTailCall();
-#endif
-}
-
bool LowerInvoke::insertCheapEHSupport(Function &F) {
bool Changed = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
- std::vector<Value*> CallArgs(II->op_begin(), II->op_end() - 3);
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
// Insert a normal call instruction...
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
CallArgs.begin(), CallArgs.end(),
@@ -257,9 +198,6 @@ bool LowerInvoke::insertCheapEHSupport(Function &F) {
++NumInvokes; Changed = true;
} else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(UI);
-
// Insert a call to abort()
CallInst::Create(AbortFn, "", UI)->setTailCall();
@@ -320,7 +258,7 @@ void LowerInvoke::rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo,
CatchSwitch->addCase(InvokeNoC, II->getUnwindDest());
// Insert a normal call instruction.
- std::vector<Value*> CallArgs(II->op_begin(), II->op_end() - 3);
+ SmallVector<Value*,16> CallArgs(II->op_begin(), II->op_end() - 3);
CallInst *NewCall = CallInst::Create(II->getCalledValue(),
CallArgs.begin(), CallArgs.end(), "",
II);
@@ -349,7 +287,7 @@ static void MarkBlocksLiveIn(BasicBlock *BB, std::set<BasicBlock*> &LiveBBs) {
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
void LowerInvoke::
-splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
+splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -371,16 +309,33 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- // This is always a no-op cast because we're casting AI to AI->getType() so
- // src and destination types are identical. BitCast is the only possibility.
- CastInst *NC = new BitCastInst(
- AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
- AI->replaceAllUsesWith(NC);
- // Normally its is forbidden to replace a CastInst's operand because it
- // could cause the opcode to reflect an illegal conversion. However, we're
- // replacing it here with the same value it was constructed with to simply
- // make NC its user.
- NC->setOperand(0, AI);
+ const Type *Ty = AI->getType();
+ // Aggregate types can't be cast, but are legal argument types, so we have
+ // to handle them differently. We use an extract/insert pair as a
+ // lightweight method to achieve the same goal.
+ if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+ Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
+ Instruction *NI = InsertValueInst::Create(AI, EI, 0);
+ NI->insertAfter(EI);
+ AI->replaceAllUsesWith(NI);
+ // Set the operand of the instructions back to the AllocaInst.
+ EI->setOperand(0, AI);
+ NI->setOperand(0, AI);
+ } else {
+ // This is always a no-op cast because we're casting AI to AI->getType()
+ // so src and destination types are identical. BitCast is the only
+ // possibility.
+ CastInst *NC = new BitCastInst(
+ AI, AI->getType(), AI->getName()+".tmp", AfterAllocaInsertPt);
+ AI->replaceAllUsesWith(NC);
+ // Set the operand of the cast instruction back to the AllocaInst.
+ // Normally it's forbidden to replace a CastInst's operand because it
+ // could cause the opcode to reflect an illegal conversion. However,
+ // we're replacing it here with the same value it was constructed with.
+ // We do this because the above replaceAllUsesWith() clobbered the
+ // operand, but we want this one to remain.
+ NC->setOperand(0, AI);
+ }
}
// Finally, scan the code looking for instructions with bad live ranges.
@@ -402,7 +357,7 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
continue;
// Avoid iterator invalidation by copying users to a temporary vector.
- std::vector<Instruction*> Users;
+ SmallVector<Instruction*,16> Users;
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
UI != E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
@@ -452,9 +407,9 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
}
bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
- std::vector<ReturnInst*> Returns;
- std::vector<UnwindInst*> Unwinds;
- std::vector<InvokeInst*> Invokes;
+ SmallVector<ReturnInst*,16> Returns;
+ SmallVector<UnwindInst*,16> Unwinds;
+ SmallVector<InvokeInst*,16> Invokes;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
@@ -502,12 +457,11 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
new AllocaInst(JBLinkTy, 0, Align,
"jblink", F.begin()->begin());
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 1));
- OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 1) };
+ OldJmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"OldBuf",
- EntryBB->getTerminator());
+ EntryBB->getTerminator());
// Copy the JBListHead to the alloca.
Value *OldBuf = new LoadInst(JBListHead, "oldjmpbufptr", true,
@@ -552,7 +506,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
"setjmp.cont");
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 0);
- Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, Idx.begin(), Idx.end(),
+ Value *JmpBufPtr = GetElementPtrInst::Create(JmpBuf, &Idx[0], &Idx[2],
"TheJmpBuf",
EntryBB->getTerminator());
JmpBufPtr = new BitCastInst(JmpBufPtr,
@@ -605,24 +559,20 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Create the block to do the longjmp.
// Get a pointer to the jmpbuf and longjmp.
- std::vector<Value*> Idx;
- Idx.push_back(Constant::getNullValue(Type::getInt32Ty(F.getContext())));
- Idx.push_back(ConstantInt::get(Type::getInt32Ty(F.getContext()), 0));
- Idx[0] = GetElementPtrInst::Create(BufPtr, Idx.begin(), Idx.end(), "JmpBuf",
+ Value *Idx[] = { Constant::getNullValue(Type::getInt32Ty(F.getContext())),
+ ConstantInt::get(Type::getInt32Ty(F.getContext()), 0) };
+ Idx[0] = GetElementPtrInst::Create(BufPtr, &Idx[0], &Idx[2], "JmpBuf",
UnwindBlock);
Idx[0] = new BitCastInst(Idx[0],
Type::getInt8PtrTy(F.getContext()),
"tmp", UnwindBlock);
Idx[1] = ConstantInt::get(Type::getInt32Ty(F.getContext()), 1);
- CallInst::Create(LongJmpFn, Idx.begin(), Idx.end(), "", UnwindBlock);
+ CallInst::Create(LongJmpFn, &Idx[0], &Idx[2], "", UnwindBlock);
new UnreachableInst(F.getContext(), UnwindBlock);
// Set up the term block ("throw without a catch").
new UnreachableInst(F.getContext(), TermBlock);
- // Insert a new call to write(2, AbortMessage, AbortMessageLength);
- writeAbortMessage(TermBlock->getTerminator());
-
// Insert a call to abort()
CallInst::Create(AbortFn, "",
TermBlock->getTerminator())->setTailCall();
diff --git a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 13f0a28..c0de193 100644
--- a/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -69,11 +69,12 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
// Only allow direct and non-volatile loads and stores...
for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
- UI != UE; ++UI) // Loop over all of the uses of the alloca
- if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
+ UI != UE; ++UI) { // Loop over all of the uses of the alloca
+ const User *U = *UI;
+ if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile())
return false;
- } else if (const StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == AI)
return false; // Don't allow a store OF the AI, only INTO the AI.
if (SI->isVolatile())
@@ -81,6 +82,7 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
} else {
return false;
}
+ }
return true;
}
@@ -603,9 +605,8 @@ ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
// To determine liveness, we must iterate through the predecessors of blocks
// where the def is live. Blocks are added to the worklist if we need to
// check their predecessors. Start with all the using blocks.
- SmallVector<BasicBlock*, 64> LiveInBlockWorklist;
- LiveInBlockWorklist.insert(LiveInBlockWorklist.end(),
- Info.UsingBlocks.begin(), Info.UsingBlocks.end());
+ SmallVector<BasicBlock*, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(),
+ Info.UsingBlocks.end());
// If any of the using blocks is also a definition block, check to see if the
// definition occurs before or after the use. If it happens before the use,
@@ -897,6 +898,9 @@ void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
// Propagate any debug metadata from the store onto the dbg.value.
if (MDNode *SIMD = SI->getMetadata("dbg"))
DbgVal->setMetadata("dbg", SIMD);
+ // Otherwise propagate debug metadata from dbg.declare.
+ else if (MDNode *MD = DDI->getMetadata("dbg"))
+ DbgVal->setMetadata("dbg", MD);
}
// QueuePhiNode - queues a phi-node to be added to a basic-block for a specific
diff --git a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 9f2209d..27b07d9 100644
--- a/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1377,8 +1377,9 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI) {
bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
BasicBlock *BB = BI->getParent();
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
- if (Cond == 0) return false;
-
+ if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
+ Cond->getParent() != BB || !Cond->hasOneUse())
+ return false;
// Only allow this if the condition is a simple instruction that can be
// executed unconditionally. It must be in the same block as the branch, and
@@ -1387,11 +1388,23 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Ignore dbg intrinsics.
while(isa<DbgInfoIntrinsic>(FrontIt))
++FrontIt;
- if ((!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
- Cond->getParent() != BB || &*FrontIt != Cond || !Cond->hasOneUse()) {
- return false;
+
+ // Allow a single instruction to be hoisted in addition to the compare
+ // that feeds the branch. We later ensure that any values that _it_ uses
+ // were also live in the predecessor, so that we don't unnecessarily create
+ // register pressure or inhibit out-of-order execution.
+ Instruction *BonusInst = 0;
+ if (&*FrontIt != Cond &&
+ FrontIt->hasOneUse() && *FrontIt->use_begin() == Cond &&
+ FrontIt->isSafeToSpeculativelyExecute()) {
+ BonusInst = &*FrontIt;
+ ++FrontIt;
}
+ // Only a single bonus inst is allowed.
+ if (&*FrontIt != Cond)
+ return false;
+
// Make sure the instruction after the condition is the cond branch.
BasicBlock::iterator CondIt = Cond; ++CondIt;
// Ingore dbg intrinsics.
@@ -1429,6 +1442,44 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
!SafeToMergeTerminators(BI, PBI))
continue;
+ // Ensure that any values used in the bonus instruction are also used
+ // by the terminator of the predecessor. This means that those values
+ // must already have been resolved, so we won't be inhibiting the
+ // out-of-order core by speculating them earlier.
+ if (BonusInst) {
+ // Collect the values used by the bonus inst
+ SmallPtrSet<Value*, 4> UsedValues;
+ for (Instruction::op_iterator OI = BonusInst->op_begin(),
+ OE = BonusInst->op_end(); OI != OE; ++OI) {
+ Value* V = *OI;
+ if (!isa<Constant>(V))
+ UsedValues.insert(V);
+ }
+
+ SmallVector<std::pair<Value*, unsigned>, 4> Worklist;
+ Worklist.push_back(std::make_pair(PBI->getOperand(0), 0));
+
+ // Walk up to four levels back up the use-def chain of the predecessor's
+ // terminator to see if all those values were used. The choice of four
+ // levels is arbitrary, to provide a compile-time-cost bound.
+ while (!Worklist.empty()) {
+ std::pair<Value*, unsigned> Pair = Worklist.back();
+ Worklist.pop_back();
+
+ if (Pair.second >= 4) continue;
+ UsedValues.erase(Pair.first);
+ if (UsedValues.empty()) break;
+
+ if (Instruction* I = dyn_cast<Instruction>(Pair.first)) {
+ for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
+ OI != OE; ++OI)
+ Worklist.push_back(std::make_pair(OI->get(), Pair.second+1));
+ }
+ }
+
+ if (!UsedValues.empty()) return false;
+ }
+
Instruction::BinaryOps Opc;
bool InvertPredCond = false;
@@ -1457,9 +1508,19 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
PBI->setSuccessor(1, OldTrue);
}
+ // If we have a bonus inst, clone it into the predecessor block.
+ Instruction *NewBonus = 0;
+ if (BonusInst) {
+ NewBonus = BonusInst->clone();
+ PredBlock->getInstList().insert(PBI, NewBonus);
+ NewBonus->takeName(BonusInst);
+ BonusInst->setName(BonusInst->getName()+".old");
+ }
+
// Clone Cond into the predecessor basic block, and or/and the
// two conditions together.
Instruction *New = Cond->clone();
+ if (BonusInst) New->replaceUsesOfWith(BonusInst, NewBonus);
PredBlock->getInstList().insert(PBI, New);
New->takeName(Cond);
Cond->setName(New->getName()+".old");
@@ -1513,17 +1574,19 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
// Okay, we're going to insert the PHI node. Since PBI is not the only
// predecessor, compute the PHI'd conditional value for all of the preds.
// Any predecessor where the condition is not computable we keep symbolic.
- for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
- if ((PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) &&
+ for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
+ BasicBlock *P = *PI;
+ if ((PBI = dyn_cast<BranchInst>(P->getTerminator())) &&
PBI != BI && PBI->isConditional() &&
PBI->getCondition() == BI->getCondition() &&
PBI->getSuccessor(0) != PBI->getSuccessor(1)) {
bool CondIsTrue = PBI->getSuccessor(0) == BB;
NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()),
- CondIsTrue), *PI);
+ CondIsTrue), P);
} else {
- NewPN->addIncoming(BI->getCondition(), *PI);
+ NewPN->addIncoming(BI->getCondition(), P);
}
+ }
BI->setCondition(NewPN);
return true;
@@ -1697,10 +1760,11 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
SmallVector<BasicBlock*, 8> UncondBranchPreds;
SmallVector<BranchInst*, 8> CondBranchPreds;
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
- TerminatorInst *PTI = (*PI)->getTerminator();
+ BasicBlock *P = *PI;
+ TerminatorInst *PTI = P->getTerminator();
if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) {
if (BI->isUnconditional())
- UncondBranchPreds.push_back(*PI);
+ UncondBranchPreds.push_back(P);
else
CondBranchPreds.push_back(BI);
}
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 87ce631..3f6a90c 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -28,7 +28,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
// DenseMap. This includes any recursive calls to MapValue.
// Global values and non-function-local metadata do not need to be seeded into
- // the ValueMap if they are using the identity mapping.
+ // the VM if they are using the identity mapping.
if (isa<GlobalValue>(V) || isa<InlineAsm>(V) || isa<MDString>(V) ||
(isa<MDNode>(V) && !cast<MDNode>(V)->isFunctionLocal()))
return VMSlot = const_cast<Value*>(V);
@@ -45,7 +45,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
if (isa<ConstantInt>(C) || isa<ConstantFP>(C) ||
isa<ConstantPointerNull>(C) || isa<ConstantAggregateZero>(C) ||
- isa<UndefValue>(C) || isa<MDString>(C))
+ isa<UndefValue>(C))
return VMSlot = C; // Primitive constants map directly
if (ConstantArray *CA = dyn_cast<ConstantArray>(C)) {
@@ -125,11 +125,11 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM) {
}
/// RemapInstruction - Convert the instruction operands from referencing the
-/// current values into those specified by ValueMap.
+/// current values into those specified by VMap.
///
-void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &ValueMap) {
+void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap) {
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
- Value *V = MapValue(*op, ValueMap);
+ Value *V = MapValue(*op, VMap);
assert(V && "Referenced value not in value map!");
*op = V;
}
diff --git a/contrib/llvm/lib/Transforms/Utils/ValueMapper.h b/contrib/llvm/lib/Transforms/Utils/ValueMapper.h
index d61c24c..f4ff643 100644
--- a/contrib/llvm/lib/Transforms/Utils/ValueMapper.h
+++ b/contrib/llvm/lib/Transforms/Utils/ValueMapper.h
@@ -15,12 +15,12 @@
#ifndef VALUEMAPPER_H
#define VALUEMAPPER_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
namespace llvm {
class Value;
class Instruction;
- typedef DenseMap<const Value *, Value *> ValueToValueMapTy;
+ typedef ValueMap<const Value *, Value *> ValueToValueMapTy;
Value *MapValue(const Value *V, ValueToValueMapTy &VM);
void RemapInstruction(Instruction *I, ValueToValueMapTy &VM);
diff --git a/contrib/llvm/lib/VMCore/AsmWriter.cpp b/contrib/llvm/lib/VMCore/AsmWriter.cpp
index e48c026..09b8aa5 100644
--- a/contrib/llvm/lib/VMCore/AsmWriter.cpp
+++ b/contrib/llvm/lib/VMCore/AsmWriter.cpp
@@ -70,8 +70,7 @@ static const Module *getModuleFromVal(const Value *V) {
// PrintEscapedString - Print each character of the specified string, escaping
// it if it is not printable or if it is an escape char.
-static void PrintEscapedString(const StringRef &Name,
- raw_ostream &Out) {
+static void PrintEscapedString(StringRef Name, raw_ostream &Out) {
for (unsigned i = 0, e = Name.size(); i != e; ++i) {
unsigned char C = Name[i];
if (isprint(C) && C != '\\' && C != '"')
@@ -91,8 +90,7 @@ enum PrefixType {
/// PrintLLVMName - Turn the specified name into an 'LLVM name', which is either
/// prefixed with % (if the string only contains simple characters) or is
/// surrounded with ""'s (if it has special chars in it). Print it out.
-static void PrintLLVMName(raw_ostream &OS, const StringRef &Name,
- PrefixType Prefix) {
+static void PrintLLVMName(raw_ostream &OS, StringRef Name, PrefixType Prefix) {
assert(Name.data() && "Cannot get empty name!");
switch (Prefix) {
default: llvm_unreachable("Bad prefix!");
@@ -856,8 +854,9 @@ static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
}
}
-static void WriteConstantInt(raw_ostream &Out, const Constant *CV,
- TypePrinting &TypePrinter, SlotTracker *Machine) {
+static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
+ TypePrinting &TypePrinter,
+ SlotTracker *Machine) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
if (CI->getType()->isIntegerTy(1)) {
Out << (CI->getZExtValue() ? "true" : "false");
@@ -1148,7 +1147,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V,
const Constant *CV = dyn_cast<Constant>(V);
if (CV && !isa<GlobalValue>(CV)) {
assert(TypePrinter && "Constants require TypePrinting!");
- WriteConstantInt(Out, CV, *TypePrinter, Machine);
+ WriteConstantInternal(Out, CV, *TypePrinter, Machine);
return;
}
@@ -1419,6 +1418,9 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT,
case GlobalValue::ExternalLinkage: break;
case GlobalValue::PrivateLinkage: Out << "private "; break;
case GlobalValue::LinkerPrivateLinkage: Out << "linker_private "; break;
+ case GlobalValue::LinkerPrivateWeakLinkage:
+ Out << "linker_private_weak ";
+ break;
case GlobalValue::InternalLinkage: Out << "internal "; break;
case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break;
case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break;
@@ -1469,8 +1471,11 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) {
writeOperand(GV->getInitializer(), false);
}
- if (GV->hasSection())
- Out << ", section \"" << GV->getSection() << '"';
+ if (GV->hasSection()) {
+ Out << ", section \"";
+ PrintEscapedString(GV->getSection(), Out);
+ Out << '"';
+ }
if (GV->getAlignment())
Out << ", align " << GV->getAlignment();
@@ -1628,8 +1633,11 @@ void AssemblyWriter::printFunction(const Function *F) {
Attributes FnAttrs = Attrs.getFnAttributes();
if (FnAttrs != Attribute::None)
Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes());
- if (F->hasSection())
- Out << " section \"" << F->getSection() << '"';
+ if (F->hasSection()) {
+ Out << " section \"";
+ PrintEscapedString(F->getSection(), Out);
+ Out << '"';
+ }
if (F->getAlignment())
Out << " align " << F->getAlignment();
if (F->hasGC())
@@ -1854,6 +1862,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
default: Out << " cc" << CI->getCallingConv(); break;
}
+ Operand = CI->getCalledValue();
const PointerType *PTy = cast<PointerType>(Operand->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
const Type *RetTy = FTy->getReturnType();
@@ -1877,10 +1886,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(Operand, true);
}
Out << '(';
- for (unsigned op = 1, Eop = I.getNumOperands(); op < Eop; ++op) {
- if (op > 1)
+ for (unsigned op = 0, Eop = CI->getNumArgOperands(); op < Eop; ++op) {
+ if (op > 0)
Out << ", ";
- writeParamOperand(I.getOperand(op), PAL.getParamAttributes(op));
+ writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1));
}
Out << ')';
if (PAL.getFnAttributes() != Attribute::None)
@@ -1925,10 +1934,10 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
writeOperand(Operand, true);
}
Out << '(';
- for (unsigned op = 0, Eop = I.getNumOperands() - 3; op < Eop; ++op) {
+ for (unsigned op = 0, Eop = II->getNumArgOperands(); op < Eop; ++op) {
if (op)
Out << ", ";
- writeParamOperand(I.getOperand(op), PAL.getParamAttributes(op + 1));
+ writeParamOperand(II->getArgOperand(op), PAL.getParamAttributes(op + 1));
}
Out << ')';
@@ -2027,7 +2036,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
}
static void WriteMDNodeComment(const MDNode *Node,
- formatted_raw_ostream &Out) {
+ formatted_raw_ostream &Out) {
if (Node->getNumOperands() < 1)
return;
ConstantInt *CI = dyn_cast_or_null<ConstantInt>(Node->getOperand(0));
@@ -2119,7 +2128,7 @@ void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
} else if (const MDNode *N = dyn_cast<MDNode>(this)) {
const Function *F = N->getFunction();
SlotTracker SlotTable(F);
- AssemblyWriter W(OS, SlotTable, F ? getModuleFromVal(F) : 0, AAW);
+ AssemblyWriter W(OS, SlotTable, F ? F->getParent() : 0, AAW);
W.printMDNodeBody(N);
} else if (const NamedMDNode *N = dyn_cast<NamedMDNode>(this)) {
SlotTracker SlotTable(N->getParent());
@@ -2129,7 +2138,7 @@ void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const {
TypePrinting TypePrinter;
TypePrinter.print(C->getType(), OS);
OS << ' ';
- WriteConstantInt(OS, C, TypePrinter, 0);
+ WriteConstantInternal(OS, C, TypePrinter, 0);
} else if (isa<InlineAsm>(this) || isa<MDString>(this) ||
isa<Argument>(this)) {
WriteAsOperand(OS, this, true, 0);
diff --git a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
index 0144210..dc39024 100644
--- a/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/contrib/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -18,6 +18,7 @@
#include "llvm/Module.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/IRBuilder.h"
#include <cstring>
@@ -314,7 +315,8 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Function *F = CI->getCalledFunction();
LLVMContext &C = CI->getContext();
-
+ ImmutableCallSite CS(CI);
+
assert(F && "CallInst has no function associated with it.");
if (!NewFn) {
@@ -344,11 +346,11 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
std::vector<Constant*> Idxs;
- Value *Op0 = CI->getOperand(1);
+ Value *Op0 = CI->getArgOperand(0);
ShuffleVectorInst *SI = NULL;
if (isLoadH || isLoadL) {
Value *Op1 = UndefValue::get(Op0->getType());
- Value *Addr = new BitCastInst(CI->getOperand(2),
+ Value *Addr = new BitCastInst(CI->getArgOperand(1),
Type::getDoublePtrTy(C),
"upgraded.", CI);
Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
@@ -381,7 +383,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
} else if (isMovSD ||
isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
- Value *Op1 = CI->getOperand(2);
+ Value *Op1 = CI->getArgOperand(1);
if (isMovSD) {
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
@@ -395,8 +397,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Mask = ConstantVector::get(Idxs);
SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
} else if (isShufPD) {
- Value *Op1 = CI->getOperand(2);
- unsigned MaskVal = cast<ConstantInt>(CI->getOperand(3))->getZExtValue();
+ Value *Op1 = CI->getArgOperand(1);
+ unsigned MaskVal = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
((MaskVal >> 1) & 1)+2));
@@ -416,8 +418,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
} else if (F->getName() == "llvm.x86.sse41.pmulld") {
// Upgrade this set of intrinsics into vector multiplies.
- Instruction *Mul = BinaryOperator::CreateMul(CI->getOperand(1),
- CI->getOperand(2),
+ Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
+ CI->getArgOperand(1),
CI->getName(),
CI);
// Fix up all the uses with our new multiply.
@@ -427,9 +429,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Remove upgraded multiply.
CI->eraseFromParent();
} else if (F->getName() == "llvm.x86.ssse3.palign.r") {
- Value *Op1 = CI->getOperand(1);
- Value *Op2 = CI->getOperand(2);
- Value *Op3 = CI->getOperand(3);
+ Value *Op1 = CI->getArgOperand(0);
+ Value *Op2 = CI->getArgOperand(1);
+ Value *Op3 = CI->getArgOperand(2);
unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
Value *Rep;
IRBuilder<> Builder(C);
@@ -483,9 +485,9 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
} else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
- Value *Op1 = CI->getOperand(1);
- Value *Op2 = CI->getOperand(2);
- Value *Op3 = CI->getOperand(3);
+ Value *Op1 = CI->getArgOperand(0);
+ Value *Op2 = CI->getArgOperand(1);
+ Value *Op3 = CI->getArgOperand(2);
unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
Value *Rep;
IRBuilder<> Builder(C);
@@ -556,10 +558,10 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::x86_mmx_psrl_w: {
Value *Operands[2];
- Operands[0] = CI->getOperand(1);
+ Operands[0] = CI->getArgOperand(0);
// Cast the second parameter to the correct type.
- BitCastInst *BC = new BitCastInst(CI->getOperand(2),
+ BitCastInst *BC = new BitCastInst(CI->getArgOperand(1),
NewFn->getFunctionType()->getParamType(1),
"upgraded.", CI);
Operands[1] = BC;
@@ -583,9 +585,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::ctlz:
case Intrinsic::ctpop:
case Intrinsic::cttz: {
- // Build a small vector of the 1..(N-1) operands, which are the
- // parameters.
- SmallVector<Value*, 8> Operands(CI->op_begin()+1, CI->op_end());
+ // Build a small vector of the original arguments.
+ SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
// Construct a new CallInst
CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
@@ -620,7 +621,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::eh_selector:
case Intrinsic::eh_typeid_for: {
// Only the return type changed.
- SmallVector<Value*, 8> Operands(CI->op_begin() + 1, CI->op_end());
+ SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
"upgraded." + CI->getName(), CI);
NewCI->setTailCall(CI->isTailCall());
@@ -643,8 +644,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
case Intrinsic::memset: {
// Add isVolatile
const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
- Value *Operands[5] = { CI->getOperand(1), CI->getOperand(2),
- CI->getOperand(3), CI->getOperand(4),
+ Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3),
llvm::ConstantInt::get(I1Ty, 0) };
CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
CI->getName(), CI);
@@ -726,7 +727,8 @@ void llvm::CheckDebugInfoIntrinsics(Module *M) {
if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
if (!Declare->use_empty()) {
DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
- if (!isa<MDNode>(DDI->getOperand(1)) ||!isa<MDNode>(DDI->getOperand(2))) {
+ if (!isa<MDNode>(DDI->getArgOperand(0)) ||
+ !isa<MDNode>(DDI->getArgOperand(1))) {
while (!Declare->use_empty()) {
CallInst *CI = cast<CallInst>(Declare->use_back());
CI->eraseFromParent();
diff --git a/contrib/llvm/lib/VMCore/ConstantFold.cpp b/contrib/llvm/lib/VMCore/ConstantFold.cpp
index 549977c..3567266 100644
--- a/contrib/llvm/lib/VMCore/ConstantFold.cpp
+++ b/contrib/llvm/lib/VMCore/ConstantFold.cpp
@@ -658,7 +658,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
}
}
// Handle an offsetof-like expression.
- if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()){
+ if (Ty->isStructTy() || Ty->isArrayTy()) {
if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
DestTy, false))
return C;
@@ -1817,8 +1817,15 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
return Constant::getAllOnesValue(ResultTy);
// Handle some degenerate cases first
- if (isa<UndefValue>(C1) || isa<UndefValue>(C2))
+ if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ // For EQ and NE, we can always pick a value for the undef to make the
+ // predicate pass or fail, so we can return undef.
+ if (ICmpInst::isEquality(ICmpInst::Predicate(pred)))
+ return UndefValue::get(ResultTy);
+ // Otherwise, pick the same value as the non-undef operand, and fold
+ // it to true or false.
return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred));
+ }
// No compile-time operations on this type yet.
if (C1->getType()->isPPC_FP128Ty())
@@ -2194,7 +2201,7 @@ Constant *llvm::ConstantFoldGetElementPtr(Constant *C,
}
NewIndices.push_back(Combined);
- NewIndices.insert(NewIndices.end(), Idxs+1, Idxs+NumIdx);
+ NewIndices.append(Idxs+1, Idxs+NumIdx);
return (inBounds && cast<GEPOperator>(CE)->isInBounds()) ?
ConstantExpr::getInBoundsGetElementPtr(CE->getOperand(0),
&NewIndices[0],
diff --git a/contrib/llvm/lib/VMCore/Core.cpp b/contrib/llvm/lib/VMCore/Core.cpp
index bbf1375..ca1a399 100644
--- a/contrib/llvm/lib/VMCore/Core.cpp
+++ b/contrib/llvm/lib/VMCore/Core.cpp
@@ -1058,6 +1058,8 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) {
return LLVMPrivateLinkage;
case GlobalValue::LinkerPrivateLinkage:
return LLVMLinkerPrivateLinkage;
+ case GlobalValue::LinkerPrivateWeakLinkage:
+ return LLVMLinkerPrivateWeakLinkage;
case GlobalValue::DLLImportLinkage:
return LLVMDLLImportLinkage;
case GlobalValue::DLLExportLinkage:
@@ -1108,6 +1110,9 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) {
case LLVMLinkerPrivateLinkage:
GV->setLinkage(GlobalValue::LinkerPrivateLinkage);
break;
+ case LLVMLinkerPrivateWeakLinkage:
+ GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage);
+ break;
case LLVMDLLImportLinkage:
GV->setLinkage(GlobalValue::DLLImportLinkage);
break;
@@ -2205,15 +2210,14 @@ LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(
LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
char **OutMessage) {
- MemoryBuffer *MB = MemoryBuffer::getSTDIN();
- if (!MB->getBufferSize()) {
- delete MB;
- *OutMessage = strdup("stdin is empty.");
- return 1;
+ std::string Error;
+ if (MemoryBuffer *MB = MemoryBuffer::getSTDIN(&Error)) {
+ *OutMemBuf = wrap(MB);
+ return 0;
}
- *OutMemBuf = wrap(MB);
- return 0;
+ *OutMessage = strdup(Error.c_str());
+ return 1;
}
void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf) {
diff --git a/contrib/llvm/lib/VMCore/Instruction.cpp b/contrib/llvm/lib/VMCore/Instruction.cpp
index a37fe07..9792ada 100644
--- a/contrib/llvm/lib/VMCore/Instruction.cpp
+++ b/contrib/llvm/lib/VMCore/Instruction.cpp
@@ -286,9 +286,10 @@ bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
// PHI nodes uses values in the corresponding predecessor block. For other
// instructions, just check to see whether the parent of the use matches up.
- const PHINode *PN = dyn_cast<PHINode>(*UI);
+ const User *U = *UI;
+ const PHINode *PN = dyn_cast<PHINode>(U);
if (PN == 0) {
- if (cast<Instruction>(*UI)->getParent() != BB)
+ if (cast<Instruction>(U)->getParent() != BB)
return true;
continue;
}
@@ -401,12 +402,20 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
return false;
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
- if (isa<AllocaInst>(getOperand(0)))
+ // It's also not safe to follow a bitcast, for example:
+ // bitcast i8* (alloca i8) to i32*
+ // would result in a 4-byte load from a 1-byte alloca.
+ Value *Op0 = getOperand(0);
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) {
+ // TODO: it's safe to do this for any GEP with constant indices that
+ // compute inside the allocated type, but not for any inbounds gep.
+ if (GEP->hasAllZeroIndices())
+ Op0 = GEP->getPointerOperand();
+ }
+ if (isa<AllocaInst>(Op0))
return true;
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(getOperand(0)))
return !GV->hasExternalWeakLinkage();
- // FIXME: Handle cases involving GEPs. We have to be careful because
- // a load of a out-of-bounds GEP has undefined behavior.
return false;
}
case Call:
@@ -421,6 +430,7 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
case Store:
case Ret:
case Br:
+ case IndirectBr:
case Switch:
case Unwind:
case Unreachable:
diff --git a/contrib/llvm/lib/VMCore/Instructions.cpp b/contrib/llvm/lib/VMCore/Instructions.cpp
index f64b220..c13696f 100644
--- a/contrib/llvm/lib/VMCore/Instructions.cpp
+++ b/contrib/llvm/lib/VMCore/Instructions.cpp
@@ -33,7 +33,9 @@ using namespace llvm;
User::op_iterator CallSite::getCallee() const {
Instruction *II(getInstruction());
return isCall()
- ? cast<CallInst>(II)->op_begin()
+ ? (CallInst::ArgOffset
+ ? cast</*FIXME: CallInst*/User>(II)->op_begin()
+ : cast</*FIXME: CallInst*/User>(II)->op_end() - 1)
: cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Function
}
@@ -231,8 +233,7 @@ CallInst::~CallInst() {
void CallInst::init(Value *Func, Value* const *Params, unsigned NumParams) {
assert(NumOperands == NumParams+1 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
+ Op<ArgOffset -1>() = Func;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -245,16 +246,15 @@ void CallInst::init(Value *Func, Value* const *Params, unsigned NumParams) {
assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Params[i]->getType()) &&
"Calling a function with a bad signature!");
- OL[i+1] = Params[i];
+ OperandList[i + ArgOffset] = Params[i];
}
}
void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
assert(NumOperands == 3 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
- OL[1] = Actual1;
- OL[2] = Actual2;
+ Op<ArgOffset -1>() = Func;
+ Op<ArgOffset + 0>() = Actual1;
+ Op<ArgOffset + 1>() = Actual2;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -273,9 +273,8 @@ void CallInst::init(Value *Func, Value *Actual1, Value *Actual2) {
void CallInst::init(Value *Func, Value *Actual) {
assert(NumOperands == 2 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
- OL[1] = Actual;
+ Op<ArgOffset -1>() = Func;
+ Op<ArgOffset + 0>() = Actual;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -291,8 +290,7 @@ void CallInst::init(Value *Func, Value *Actual) {
void CallInst::init(Value *Func) {
assert(NumOperands == 1 && "NumOperands not set up?");
- Use *OL = OperandList;
- OL[0] = Func;
+ Op<ArgOffset -1>() = Func;
const FunctionType *FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
@@ -473,9 +471,10 @@ static Instruction *createMalloc(Instruction *InsertBefore,
Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
const Type *IntPtrTy, const Type *AllocTy,
Value *AllocSize, Value *ArraySize,
+ Function * MallocF,
const Twine &Name) {
return createMalloc(InsertBefore, NULL, IntPtrTy, AllocTy, AllocSize,
- ArraySize, NULL, Name);
+ ArraySize, MallocF, Name);
}
/// CreateMalloc - Generate the IR for a call to malloc:
@@ -527,8 +526,8 @@ static Instruction* createFree(Value* Source, Instruction *InsertBefore,
}
/// CreateFree - Generate the IR for a call to the builtin free function.
-void CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
- createFree(Source, InsertBefore, NULL);
+Instruction * CallInst::CreateFree(Value* Source, Instruction *InsertBefore) {
+ return createFree(Source, InsertBefore, NULL);
}
/// CreateFree - Generate the IR for a call to the builtin free function.
@@ -828,8 +827,8 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
else {
assert(!isa<BasicBlock>(Amt) &&
"Passed basic block into allocation size parameter! Use other ctor");
- assert(Amt->getType()->isIntegerTy(32) &&
- "Allocation array size is not a 32-bit integer!");
+ assert(Amt->getType()->isIntegerTy() &&
+ "Allocation array size is not an integer!");
}
return Amt;
}
@@ -1456,7 +1455,7 @@ void InsertValueInst::init(Value *Agg, Value *Val, const unsigned *Idx,
Op<0>() = Agg;
Op<1>() = Val;
- Indices.insert(Indices.end(), Idx, Idx + NumIdx);
+ Indices.append(Idx, Idx + NumIdx);
setName(Name);
}
@@ -1509,7 +1508,7 @@ void ExtractValueInst::init(const unsigned *Idx, unsigned NumIdx,
const Twine &Name) {
assert(NumOperands == 1 && "NumOperands not initialized?");
- Indices.insert(Indices.end(), Idx, Idx + NumIdx);
+ Indices.append(Idx, Idx + NumIdx);
setName(Name);
}
@@ -1911,9 +1910,12 @@ bool CastInst::isLosslessCast() const {
/// # bitcast i32* %x to i8*
/// # bitcast <2 x i32> %x to <4 x i16>
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
-/// @brief Determine if a cast is a no-op.
-bool CastInst::isNoopCast(const Type *IntPtrTy) const {
- switch (getOpcode()) {
+/// @brief Determine if the described cast is a no-op.
+bool CastInst::isNoopCast(Instruction::CastOps Opcode,
+ const Type *SrcTy,
+ const Type *DestTy,
+ const Type *IntPtrTy) {
+ switch (Opcode) {
default:
assert(!"Invalid CastOp");
case Instruction::Trunc:
@@ -1930,13 +1932,18 @@ bool CastInst::isNoopCast(const Type *IntPtrTy) const {
return true; // BitCast never modifies bits.
case Instruction::PtrToInt:
return IntPtrTy->getScalarSizeInBits() ==
- getType()->getScalarSizeInBits();
+ DestTy->getScalarSizeInBits();
case Instruction::IntToPtr:
return IntPtrTy->getScalarSizeInBits() ==
- getOperand(0)->getType()->getScalarSizeInBits();
+ SrcTy->getScalarSizeInBits();
}
}
+/// @brief Determine if a cast is a no-op.
+bool CastInst::isNoopCast(const Type *IntPtrTy) const {
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
/// This function determines if a pair of casts can be eliminated and what
/// opcode should be used in the elimination. This assumes that there are two
/// instructions like this:
@@ -1999,6 +2006,14 @@ unsigned CastInst::isEliminableCastPair(
{ 99,99,99,99,99,99,99,99,99,13,99,12 }, // IntToPtr |
{ 5, 5, 5, 6, 6, 5, 5, 6, 6,11, 5, 1 }, // BitCast -+
};
+
+ // If either of the casts are a bitcast from scalar to vector, disallow the
+ // merging.
+ if ((firstOp == Instruction::BitCast &&
+ isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
+ (secondOp == Instruction::BitCast &&
+ isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
+ return 0; // Disallowed
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
[secondOp-Instruction::CastOpsBegin];
diff --git a/contrib/llvm/lib/VMCore/IntrinsicInst.cpp b/contrib/llvm/lib/VMCore/IntrinsicInst.cpp
index c37d5b0..ac8ec20 100644
--- a/contrib/llvm/lib/VMCore/IntrinsicInst.cpp
+++ b/contrib/llvm/lib/VMCore/IntrinsicInst.cpp
@@ -54,7 +54,7 @@ Value *DbgInfoIntrinsic::StripCast(Value *C) {
///
Value *DbgDeclareInst::getAddress() const {
- if (MDNode* MD = cast_or_null<MDNode>(getOperand(1)))
+ if (MDNode* MD = cast_or_null<MDNode>(getArgOperand(0)))
return MD->getOperand(0);
else
return NULL;
@@ -65,9 +65,9 @@ Value *DbgDeclareInst::getAddress() const {
///
const Value *DbgValueInst::getValue() const {
- return cast<MDNode>(getOperand(1))->getOperand(0);
+ return cast<MDNode>(getArgOperand(0))->getOperand(0);
}
Value *DbgValueInst::getValue() {
- return cast<MDNode>(getOperand(1))->getOperand(0);
+ return cast<MDNode>(getArgOperand(0))->getOperand(0);
}
diff --git a/contrib/llvm/lib/VMCore/Metadata.cpp b/contrib/llvm/lib/VMCore/Metadata.cpp
index b894ea3..3100d4a 100644
--- a/contrib/llvm/lib/VMCore/Metadata.cpp
+++ b/contrib/llvm/lib/VMCore/Metadata.cpp
@@ -78,7 +78,8 @@ void MDNodeOperand::allUsesReplacedWith(Value *NV) {
/// getOperandPtr - Helper function to get the MDNodeOperand's coallocated on
/// the end of the MDNode.
static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) {
- assert(Op < N->getNumOperands() && "Invalid operand number");
+ // Use <= instead of < to permit a one-past-the-end address.
+ assert(Op <= N->getNumOperands() && "Invalid operand number");
return reinterpret_cast<MDNodeOperand*>(N+1)+Op;
}
@@ -133,6 +134,7 @@ static const Function *getFunctionForValue(Value *V) {
static const Function *assertLocalFunction(const MDNode *N) {
if (!N->isFunctionLocal()) return 0;
+ // FIXME: This does not handle cyclic function local metadata.
const Function *F = 0, *NewF = 0;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
if (Value *V = N->getOperand(i)) {
diff --git a/contrib/llvm/lib/VMCore/Module.cpp b/contrib/llvm/lib/VMCore/Module.cpp
index 94840f0..38a51df 100644
--- a/contrib/llvm/lib/VMCore/Module.cpp
+++ b/contrib/llvm/lib/VMCore/Module.cpp
@@ -17,6 +17,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/GVMaterializer.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/LeakDetector.h"
@@ -311,9 +312,11 @@ GlobalAlias *Module::getNamedAlias(StringRef Name) const {
/// getNamedMetadata - Return the first NamedMDNode in the module with the
/// specified name. This method returns null if a NamedMDNode with the
-//// specified name is not found.
-NamedMDNode *Module::getNamedMetadata(StringRef Name) const {
- return NamedMDSymTab->lookup(Name);
+/// specified name is not found.
+NamedMDNode *Module::getNamedMetadata(const Twine &Name) const {
+ SmallString<256> NameData;
+ StringRef NameRef = Name.toStringRef(NameData);
+ return NamedMDSymTab->lookup(NameRef);
}
/// getOrInsertNamedMetadata - Return the first named MDNode in the module
diff --git a/contrib/llvm/lib/VMCore/Pass.cpp b/contrib/llvm/lib/VMCore/Pass.cpp
index a60877d..efd98af 100644
--- a/contrib/llvm/lib/VMCore/Pass.cpp
+++ b/contrib/llvm/lib/VMCore/Pass.cpp
@@ -35,6 +35,15 @@ using namespace llvm;
// Pass Implementation
//
+Pass::Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
+ assert(pid && "pid cannot be 0");
+}
+
+Pass::Pass(PassKind K, const void *pid)
+ : Resolver(0), PassID((intptr_t)pid), Kind(K) {
+ assert(pid && "pid cannot be 0");
+}
+
// Force out-of-line virtual method.
Pass::~Pass() {
delete Resolver;
@@ -92,6 +101,23 @@ void Pass::verifyAnalysis() const {
// By default, don't do anything.
}
+void *Pass::getAdjustedAnalysisPointer(const PassInfo *) {
+ return this;
+}
+
+ImmutablePass *Pass::getAsImmutablePass() {
+ return 0;
+}
+
+PMDataManager *Pass::getAsPMDataManager() {
+ return 0;
+}
+
+void Pass::setResolver(AnalysisResolver *AR) {
+ assert(!Resolver && "Resolver is already set");
+ Resolver = AR;
+}
+
// print - Print out the internal state of the pass. This is called by Analyze
// to print out the contents of an analysis. Otherwise it is not necessary to
// implement this method.
@@ -364,6 +390,14 @@ void PassInfo::unregisterPass() {
getPassRegistrar()->UnregisterPass(*this);
}
+Pass *PassInfo::createPass() const {
+ assert((!isAnalysisGroup() || NormalCtor) &&
+ "No default implementation found for analysis group!");
+ assert(NormalCtor &&
+ "Cannot call createPass on PassInfo without default ctor!");
+ return NormalCtor();
+}
+
//===----------------------------------------------------------------------===//
// Analysis Group Implementation Code
//===----------------------------------------------------------------------===//
@@ -467,4 +501,15 @@ void AnalysisUsage::setPreservesCFG() {
GetCFGOnlyPasses(Preserved).enumeratePasses();
}
+AnalysisUsage &AnalysisUsage::addRequiredID(AnalysisID ID) {
+ assert(ID && "Pass class not registered!");
+ Required.push_back(ID);
+ return *this;
+}
+AnalysisUsage &AnalysisUsage::addRequiredTransitiveID(AnalysisID ID) {
+ assert(ID && "Pass class not registered!");
+ Required.push_back(ID);
+ RequiredTransitive.push_back(ID);
+ return *this;
+}
diff --git a/contrib/llvm/lib/VMCore/PassManager.cpp b/contrib/llvm/lib/VMCore/PassManager.cpp
index a56938c..296b0d1 100644
--- a/contrib/llvm/lib/VMCore/PassManager.cpp
+++ b/contrib/llvm/lib/VMCore/PassManager.cpp
@@ -1147,6 +1147,11 @@ void PMDataManager::addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass) {
llvm_unreachable("Unable to schedule pass");
}
+Pass *PMDataManager::getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
+ assert(0 && "Unable to find on the fly pass");
+ return NULL;
+}
+
// Destructor
PMDataManager::~PMDataManager() {
for (SmallVector<Pass *, 8>::iterator I = PassVector.begin(),
diff --git a/contrib/llvm/lib/VMCore/Value.cpp b/contrib/llvm/lib/VMCore/Value.cpp
index 645dd5a..585edf0 100644
--- a/contrib/llvm/lib/VMCore/Value.cpp
+++ b/contrib/llvm/lib/VMCore/Value.cpp
@@ -322,7 +322,13 @@ void Value::replaceAllUsesWith(Value *New) {
Value *Value::stripPointerCasts() {
if (!getType()->isPointerTy())
return this;
+
+ // Even though we don't look through PHI nodes, we could be called on an
+ // instruction in an unreachable block, which may be on a cycle.
+ SmallPtrSet<Value *, 4> Visited;
+
Value *V = this;
+ Visited.insert(V);
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if (!GEP->hasAllZeroIndices())
@@ -338,7 +344,9 @@ Value *Value::stripPointerCasts() {
return V;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- } while (1);
+ } while (Visited.insert(V));
+
+ return V;
}
Value *Value::getUnderlyingObject(unsigned MaxLookup) {
diff --git a/contrib/llvm/lib/VMCore/Verifier.cpp b/contrib/llvm/lib/VMCore/Verifier.cpp
index 75988cc..f97699d 100644
--- a/contrib/llvm/lib/VMCore/Verifier.cpp
+++ b/contrib/llvm/lib/VMCore/Verifier.cpp
@@ -85,7 +85,8 @@ namespace { // Anonymous namespace for class
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
if (I->empty() || !I->back().isTerminator()) {
- dbgs() << "Basic Block does not have terminator!\n";
+ dbgs() << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
WriteAsOperand(dbgs(), I, true);
dbgs() << "\n";
Broken = true;
@@ -1356,7 +1357,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
void Verifier::visitStoreInst(StoreInst &SI) {
const PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
- Assert1(PTy, "Load operand must be a pointer.", &SI);
+ Assert1(PTy, "Store operand must be a pointer.", &SI);
const Type *ElTy = PTy->getElementType();
Assert2(ElTy == SI.getOperand(0)->getType(),
"Stored value type does not match pointer operand type!",
@@ -1371,8 +1372,8 @@ void Verifier::visitAllocaInst(AllocaInst &AI) {
&AI);
Assert1(PTy->getElementType()->isSized(), "Cannot allocate unsized type",
&AI);
- Assert1(AI.getArraySize()->getType()->isIntegerTy(32),
- "Alloca array size must be i32", &AI);
+ Assert1(AI.getArraySize()->getType()->isIntegerTy(),
+ "Alloca array size must have integer type", &AI);
visitInstruction(AI);
}
@@ -1453,7 +1454,7 @@ void Verifier::visitInstruction(Instruction &I) {
if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
// Check to make sure that the "address of" an intrinsic function is never
// taken.
- Assert1(!F->isIntrinsic() || (i == 0 && isa<CallInst>(I)),
+ Assert1(!F->isIntrinsic() || (i + 1 == e && isa<CallInst>(I)),
"Cannot take the address of an intrinsic!", &I);
Assert1(F->getParent() == Mod, "Referencing function in another module!",
&I);
@@ -1536,7 +1537,8 @@ void Verifier::visitInstruction(Instruction &I) {
"Instruction does not dominate all uses!", Op, &I);
}
} else if (isa<InlineAsm>(I.getOperand(i))) {
- Assert1((i == 0 && isa<CallInst>(I)) || (i + 3 == e && isa<InvokeInst>(I)),
+ Assert1((i + 1 == e && isa<CallInst>(I)) ||
+ (i + 3 == e && isa<InvokeInst>(I)),
"Cannot take the address of an inline asm!", &I);
}
}
@@ -1628,24 +1630,24 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
- if (MDNode *MD = dyn_cast<MDNode>(CI.getOperand(i)))
+ for (unsigned i = 0, e = CI.getNumArgOperands(); i != e; ++i)
+ if (MDNode *MD = dyn_cast<MDNode>(CI.getArgOperand(i)))
visitMDNode(*MD, CI.getParent()->getParent());
switch (ID) {
default:
break;
case Intrinsic::dbg_declare: { // llvm.dbg.declare
- Assert1(CI.getOperand(1) && isa<MDNode>(CI.getOperand(1)),
+ Assert1(CI.getArgOperand(0) && isa<MDNode>(CI.getArgOperand(0)),
"invalid llvm.dbg.declare intrinsic call 1", &CI);
- MDNode *MD = cast<MDNode>(CI.getOperand(1));
+ MDNode *MD = cast<MDNode>(CI.getArgOperand(0));
Assert1(MD->getNumOperands() == 1,
"invalid llvm.dbg.declare intrinsic call 2", &CI);
} break;
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset:
- Assert1(isa<ConstantInt>(CI.getOperand(4)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(3)),
"alignment argument of memory intrinsics must be a constant int",
&CI);
break;
@@ -1654,10 +1656,10 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
case Intrinsic::gcread:
if (ID == Intrinsic::gcroot) {
AllocaInst *AI =
- dyn_cast<AllocaInst>(CI.getOperand(1)->stripPointerCasts());
+ dyn_cast<AllocaInst>(CI.getArgOperand(0)->stripPointerCasts());
Assert1(AI && AI->getType()->getElementType()->isPointerTy(),
"llvm.gcroot parameter #1 must be a pointer alloca.", &CI);
- Assert1(isa<Constant>(CI.getOperand(2)),
+ Assert1(isa<Constant>(CI.getArgOperand(1)),
"llvm.gcroot parameter #2 must be a constant.", &CI);
}
@@ -1665,32 +1667,32 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
"Enclosing function does not use GC.", &CI);
break;
case Intrinsic::init_trampoline:
- Assert1(isa<Function>(CI.getOperand(2)->stripPointerCasts()),
+ Assert1(isa<Function>(CI.getArgOperand(1)->stripPointerCasts()),
"llvm.init_trampoline parameter #2 must resolve to a function.",
&CI);
break;
case Intrinsic::prefetch:
- Assert1(isa<ConstantInt>(CI.getOperand(2)) &&
- isa<ConstantInt>(CI.getOperand(3)) &&
- cast<ConstantInt>(CI.getOperand(2))->getZExtValue() < 2 &&
- cast<ConstantInt>(CI.getOperand(3))->getZExtValue() < 4,
+ Assert1(isa<ConstantInt>(CI.getArgOperand(1)) &&
+ isa<ConstantInt>(CI.getArgOperand(2)) &&
+ cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue() < 4,
"invalid arguments to llvm.prefetch",
&CI);
break;
case Intrinsic::stackprotector:
- Assert1(isa<AllocaInst>(CI.getOperand(2)->stripPointerCasts()),
+ Assert1(isa<AllocaInst>(CI.getArgOperand(1)->stripPointerCasts()),
"llvm.stackprotector parameter #2 must resolve to an alloca.",
&CI);
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- Assert1(isa<ConstantInt>(CI.getOperand(1)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(0)),
"size argument of memory use markers must be a constant integer",
&CI);
break;
case Intrinsic::invariant_end:
- Assert1(isa<ConstantInt>(CI.getOperand(2)),
+ Assert1(isa<ConstantInt>(CI.getArgOperand(1)),
"llvm.invariant.end parameter #2 must be a constant integer", &CI);
break;
}
diff --git a/contrib/llvm/tools/Makefile b/contrib/llvm/tools/Makefile
index 9d2e576..9bc74fe 100644
--- a/contrib/llvm/tools/Makefile
+++ b/contrib/llvm/tools/Makefile
@@ -49,9 +49,4 @@ ifeq ($(ENABLE_PIC),1)
endif
endif
-# Don't build edis if we explicitly disabled it.
-ifeq ($(DISABLE_EDIS),1)
- PARALLEL_DIRS := $(filter-out edis, $(PARALLEL_DIRS))
-endif
-
include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/tools/bugpoint/BugDriver.h b/contrib/llvm/tools/bugpoint/BugDriver.h
index e5b7373..4f6bae5 100644
--- a/contrib/llvm/tools/bugpoint/BugDriver.h
+++ b/contrib/llvm/tools/bugpoint/BugDriver.h
@@ -16,7 +16,7 @@
#ifndef BUGDRIVER_H
#define BUGDRIVER_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
#include <vector>
#include <string>
@@ -269,7 +269,7 @@ public:
/// recreate the failure. This returns true if a compiler error is found.
///
bool runManyPasses(const std::vector<const PassInfo*> &AllPasses,
- std::string &ErrMsg);
+ std::string &ErrMsg);
/// writeProgramToFile - This writes the current "Program" to the named
/// bitcode file. If an error occurs, true is returned.
@@ -325,7 +325,7 @@ void DeleteFunctionBody(Function *F);
/// module, split the functions OUT of the specified module, and place them in
/// the new module.
Module *SplitFunctionsOutOfModule(Module *M, const std::vector<Function*> &F,
- DenseMap<const Value*, Value*> &ValueMap);
+ ValueMap<const Value*, Value*> &VMap);
} // End llvm namespace
diff --git a/contrib/llvm/tools/bugpoint/CrashDebugger.cpp b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
index 46b33d2..2d0631c 100644
--- a/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
+++ b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
@@ -130,14 +130,14 @@ bool
ReduceCrashingGlobalVariables::TestGlobalVariables(
std::vector<GlobalVariable*> &GVs) {
// Clone the program to try hacking it apart...
- DenseMap<const Value*, Value*> ValueMap;
- Module *M = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
// Convert list to set for fast lookup...
std::set<GlobalVariable*> GVSet;
for (unsigned i = 0, e = GVs.size(); i != e; ++i) {
- GlobalVariable* CMGV = cast<GlobalVariable>(ValueMap[GVs[i]]);
+ GlobalVariable* CMGV = cast<GlobalVariable>(VMap[GVs[i]]);
assert(CMGV && "Global Variable not in module?!");
GVSet.insert(CMGV);
}
@@ -204,13 +204,13 @@ bool ReduceCrashingFunctions::TestFuncs(std::vector<Function*> &Funcs) {
return false;
// Clone the program to try hacking it apart...
- DenseMap<const Value*, Value*> ValueMap;
- Module *M = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
// Convert list to set for fast lookup...
std::set<Function*> Functions;
for (unsigned i = 0, e = Funcs.size(); i != e; ++i) {
- Function *CMF = cast<Function>(ValueMap[Funcs[i]]);
+ Function *CMF = cast<Function>(VMap[Funcs[i]]);
assert(CMF && "Function not in module?!");
assert(CMF->getFunctionType() == Funcs[i]->getFunctionType() && "wrong ty");
assert(CMF->getName() == Funcs[i]->getName() && "wrong name");
@@ -270,13 +270,13 @@ namespace {
bool ReduceCrashingBlocks::TestBlocks(std::vector<const BasicBlock*> &BBs) {
// Clone the program to try hacking it apart...
- DenseMap<const Value*, Value*> ValueMap;
- Module *M = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
// Convert list to set for fast lookup...
SmallPtrSet<BasicBlock*, 8> Blocks;
for (unsigned i = 0, e = BBs.size(); i != e; ++i)
- Blocks.insert(cast<BasicBlock>(ValueMap[BBs[i]]));
+ Blocks.insert(cast<BasicBlock>(VMap[BBs[i]]));
outs() << "Checking for crash with only these blocks:";
unsigned NumPrint = Blocks.size();
@@ -298,10 +298,7 @@ bool ReduceCrashingBlocks::TestBlocks(std::vector<const BasicBlock*> &BBs) {
TerminatorInst *BBTerm = BB->getTerminator();
- if (BBTerm->getType()->isStructTy())
- BBTerm->replaceAllUsesWith(UndefValue::get(BBTerm->getType()));
- else if (BB->getTerminator()->getType() !=
- Type::getVoidTy(BB->getContext()))
+ if (!BB->getTerminator()->getType()->isVoidTy())
BBTerm->replaceAllUsesWith(Constant::getNullValue(BBTerm->getType()));
// Replace the old terminator instruction.
@@ -374,14 +371,14 @@ namespace {
bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*>
&Insts) {
// Clone the program to try hacking it apart...
- DenseMap<const Value*, Value*> ValueMap;
- Module *M = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
// Convert list to set for fast lookup...
SmallPtrSet<Instruction*, 64> Instructions;
for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
assert(!isa<TerminatorInst>(Insts[i]));
- Instructions.insert(cast<Instruction>(ValueMap[Insts[i]]));
+ Instructions.insert(cast<Instruction>(VMap[Insts[i]]));
}
outs() << "Checking for crash with only " << Instructions.size();
@@ -395,7 +392,7 @@ bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*>
for (BasicBlock::iterator I = FI->begin(), E = FI->end(); I != E;) {
Instruction *Inst = I++;
if (!Instructions.count(Inst) && !isa<TerminatorInst>(Inst)) {
- if (Inst->getType() != Type::getVoidTy(Inst->getContext()))
+ if (!Inst->getType()->isVoidTy())
Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
Inst->eraseFromParent();
}
diff --git a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
index eaa2c53..d5611b5 100644
--- a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
+++ b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
@@ -73,9 +73,7 @@ Module *BugDriver::deleteInstructionFromProgram(const Instruction *I,
Instruction *TheInst = RI; // Got the corresponding instruction!
// If this instruction produces a value, replace any users with null values
- if (TheInst->getType()->isStructTy())
- TheInst->replaceAllUsesWith(UndefValue::get(TheInst->getType()));
- else if (TheInst->getType() != Type::getVoidTy(I->getContext()))
+ if (!TheInst->getType()->isVoidTy())
TheInst->replaceAllUsesWith(Constant::getNullValue(TheInst->getType()));
// Remove the instruction from the program.
@@ -118,13 +116,14 @@ Module *BugDriver::performFinalCleanups(Module *M, bool MayModifySemantics) {
std::vector<const PassInfo*> CleanupPasses;
CleanupPasses.push_back(getPI(createGlobalDCEPass()));
- CleanupPasses.push_back(getPI(createDeadTypeEliminationPass()));
if (MayModifySemantics)
CleanupPasses.push_back(getPI(createDeadArgHackingPass()));
else
CleanupPasses.push_back(getPI(createDeadArgEliminationPass()));
+ CleanupPasses.push_back(getPI(createDeadTypeEliminationPass()));
+
Module *New = runPassesOn(M, CleanupPasses);
if (New == 0) {
errs() << "Final cleanups failed. Sorry. :( Please report a bug!\n";
@@ -202,7 +201,7 @@ static Constant *GetTorInit(std::vector<std::pair<Function*, int> > &TorList) {
/// static ctors/dtors, we need to add an llvm.global_[cd]tors global to M2, and
/// prune appropriate entries out of M1s list.
static void SplitStaticCtorDtor(const char *GlobalName, Module *M1, Module *M2,
- DenseMap<const Value*, Value*> ValueMap) {
+ ValueMap<const Value*, Value*> VMap) {
GlobalVariable *GV = M1->getNamedGlobal(GlobalName);
if (!GV || GV->isDeclaration() || GV->hasLocalLinkage() ||
!GV->use_empty()) return;
@@ -230,7 +229,7 @@ static void SplitStaticCtorDtor(const char *GlobalName, Module *M1, Module *M2,
M1Tors.push_back(std::make_pair(F, Priority));
else {
// Map to M2's version of the function.
- F = cast<Function>(ValueMap[F]);
+ F = cast<Function>(VMap[F]);
M2Tors.push_back(std::make_pair(F, Priority));
}
}
@@ -265,7 +264,7 @@ static void SplitStaticCtorDtor(const char *GlobalName, Module *M1, Module *M2,
Module *
llvm::SplitFunctionsOutOfModule(Module *M,
const std::vector<Function*> &F,
- DenseMap<const Value*, Value*> &ValueMap) {
+ ValueMap<const Value*, Value*> &VMap) {
// Make sure functions & globals are all external so that linkage
// between the two modules will work.
for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
@@ -277,8 +276,8 @@ llvm::SplitFunctionsOutOfModule(Module *M,
I->setLinkage(GlobalValue::ExternalLinkage);
}
- DenseMap<const Value*, Value*> NewValueMap;
- Module *New = CloneModule(M, NewValueMap);
+ ValueMap<const Value*, Value*> NewVMap;
+ Module *New = CloneModule(M, NewVMap);
// Make sure global initializers exist only in the safe module (CBE->.so)
for (Module::global_iterator I = New->global_begin(), E = New->global_end();
@@ -288,11 +287,11 @@ llvm::SplitFunctionsOutOfModule(Module *M,
// Remove the Test functions from the Safe module
std::set<Function *> TestFunctions;
for (unsigned i = 0, e = F.size(); i != e; ++i) {
- Function *TNOF = cast<Function>(ValueMap[F[i]]);
+ Function *TNOF = cast<Function>(VMap[F[i]]);
DEBUG(errs() << "Removing function ");
DEBUG(WriteAsOperand(errs(), TNOF, false));
DEBUG(errs() << "\n");
- TestFunctions.insert(cast<Function>(NewValueMap[TNOF]));
+ TestFunctions.insert(cast<Function>(NewVMap[TNOF]));
DeleteFunctionBody(TNOF); // Function is now external in this module!
}
@@ -305,8 +304,8 @@ llvm::SplitFunctionsOutOfModule(Module *M,
// Make sure that there is a global ctor/dtor array in both halves of the
// module if they both have static ctor/dtor functions.
- SplitStaticCtorDtor("llvm.global_ctors", M, New, NewValueMap);
- SplitStaticCtorDtor("llvm.global_dtors", M, New, NewValueMap);
+ SplitStaticCtorDtor("llvm.global_ctors", M, New, NewVMap);
+ SplitStaticCtorDtor("llvm.global_dtors", M, New, NewVMap);
return New;
}
@@ -365,8 +364,7 @@ Module *BugDriver::ExtractMappedBlocksFromModule(const
PI.push_back(getPI(createBlockExtractorPass(EmptyBBs)));
Module *Ret = runPassesOn(M, PI, false, 1, &ExtraArg);
- if (uniqueFilename.exists())
- uniqueFilename.eraseFromDisk(); // Free disk space
+ uniqueFilename.eraseFromDisk(); // Free disk space
if (Ret == 0) {
outs() << "*** Basic Block extraction failed, please report a bug!\n";
diff --git a/contrib/llvm/tools/bugpoint/ListReducer.h b/contrib/llvm/tools/bugpoint/ListReducer.h
index 5e9cff0..bd1c5da 100644
--- a/contrib/llvm/tools/bugpoint/ListReducer.h
+++ b/contrib/llvm/tools/bugpoint/ListReducer.h
@@ -183,8 +183,8 @@ Backjump:
--i; // Don't skip an element of the list
Changed = true;
}
- if (!Error.empty())
- return true;
+ if (!Error.empty())
+ return true;
}
// This can take a long time if left uncontrolled. For now, don't
// iterate.
diff --git a/contrib/llvm/tools/bugpoint/Miscompilation.cpp b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
index 71484a2..47ac3c5 100644
--- a/contrib/llvm/tools/bugpoint/Miscompilation.cpp
+++ b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
@@ -251,10 +251,10 @@ int ReduceMiscompilingFunctions::TestFuncs(const std::vector<Function*> &Funcs,
outs() << '\n';
// Split the module into the two halves of the program we want.
- DenseMap<const Value*, Value*> ValueMap;
- Module *ToNotOptimize = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize, Funcs,
- ValueMap);
+ VMap);
// Run the predicate, note that the predicate will delete both input modules.
return TestFn(BD, ToOptimize, ToNotOptimize, Error);
@@ -285,11 +285,11 @@ static bool ExtractLoops(BugDriver &BD,
while (1) {
if (BugpointIsInterrupted) return MadeChange;
- DenseMap<const Value*, Value*> ValueMap;
- Module *ToNotOptimize = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
MiscompiledFunctions,
- ValueMap);
+ VMap);
Module *ToOptimizeLoopExtracted = BD.ExtractLoop(ToOptimize);
if (!ToOptimizeLoopExtracted) {
// If the loop extractor crashed or if there were no extractible loops,
@@ -448,11 +448,11 @@ bool ReduceMiscompiledBlocks::TestFuncs(const std::vector<BasicBlock*> &BBs,
outs() << '\n';
// Split the module into the two halves of the program we want.
- DenseMap<const Value*, Value*> ValueMap;
- Module *ToNotOptimize = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
FunctionsBeingTested,
- ValueMap);
+ VMap);
// Try the extraction. If it doesn't work, then the block extractor crashed
// or something, in which case bugpoint can't chase down this possibility.
@@ -505,11 +505,11 @@ static bool ExtractBlocks(BugDriver &BD,
return false;
}
- DenseMap<const Value*, Value*> ValueMap;
- Module *ProgClone = CloneModule(BD.getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ProgClone = CloneModule(BD.getProgram(), VMap);
Module *ToExtract = SplitFunctionsOutOfModule(ProgClone,
MiscompiledFunctions,
- ValueMap);
+ VMap);
Module *Extracted = BD.ExtractMappedBlocksFromModule(Blocks, ToExtract);
if (Extracted == 0) {
// Weird, extraction should have worked.
@@ -687,11 +687,11 @@ void BugDriver::debugMiscompilation(std::string *Error) {
// Output a bunch of bitcode files for the user...
outs() << "Outputting reduced bitcode files which expose the problem:\n";
- DenseMap<const Value*, Value*> ValueMap;
- Module *ToNotOptimize = CloneModule(getProgram(), ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ToNotOptimize = CloneModule(getProgram(), VMap);
Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
MiscompiledFunctions,
- ValueMap);
+ VMap);
outs() << " Non-optimized portion: ";
ToNotOptimize = swapProgramIn(ToNotOptimize);
@@ -848,7 +848,7 @@ static void CleanupAndPrepareModules(BugDriver &BD, Module *&Test,
Args.push_back(i);
// Pass on the arguments to the real function, return its result
- if (F->getReturnType() == Type::getVoidTy(F->getContext())) {
+ if (F->getReturnType()->isVoidTy()) {
CallInst::Create(FuncPtr, Args.begin(), Args.end(), "", DoCallBB);
ReturnInst::Create(F->getContext(), DoCallBB);
} else {
@@ -894,6 +894,8 @@ static bool TestCodeGenerator(BugDriver &BD, Module *Test, Module *Safe,
}
delete Test;
+ FileRemover TestModuleBCRemover(TestModuleBC, !SaveTemps);
+
// Make the shared library
sys::Path SafeModuleBC("bugpoint.safe.bc");
if (SafeModuleBC.makeUnique(true, &ErrMsg)) {
@@ -907,11 +909,16 @@ static bool TestCodeGenerator(BugDriver &BD, Module *Test, Module *Safe,
<< "'\nExiting.";
exit(1);
}
+
+ FileRemover SafeModuleBCRemover(SafeModuleBC, !SaveTemps);
+
std::string SharedObject = BD.compileSharedObject(SafeModuleBC.str(), Error);
if (!Error.empty())
return false;
delete Safe;
+ FileRemover SharedObjectRemover(sys::Path(SharedObject), !SaveTemps);
+
// Run the code generator on the `Test' code, loading the shared library.
// The function returns whether or not the new output differs from reference.
bool Result = BD.diffProgram(TestModuleBC.str(), SharedObject, false, &Error);
@@ -922,9 +929,6 @@ static bool TestCodeGenerator(BugDriver &BD, Module *Test, Module *Safe,
errs() << ": still failing!\n";
else
errs() << ": didn't fail.\n";
- TestModuleBC.eraseFromDisk();
- SafeModuleBC.eraseFromDisk();
- sys::Path(SharedObject).eraseFromDisk();
return Result;
}
@@ -956,9 +960,9 @@ bool BugDriver::debugCodeGenerator(std::string *Error) {
return true;
// Split the module into the two halves of the program we want.
- DenseMap<const Value*, Value*> ValueMap;
- Module *ToNotCodeGen = CloneModule(getProgram(), ValueMap);
- Module *ToCodeGen = SplitFunctionsOutOfModule(ToNotCodeGen, Funcs, ValueMap);
+ ValueMap<const Value*, Value*> VMap;
+ Module *ToNotCodeGen = CloneModule(getProgram(), VMap);
+ Module *ToCodeGen = SplitFunctionsOutOfModule(ToNotCodeGen, Funcs, VMap);
// Condition the modules
CleanupAndPrepareModules(*this, ToCodeGen, ToNotCodeGen);
diff --git a/contrib/llvm/tools/bugpoint/ToolRunner.h b/contrib/llvm/tools/bugpoint/ToolRunner.h
index d966fc0..cda0ddf 100644
--- a/contrib/llvm/tools/bugpoint/ToolRunner.h
+++ b/contrib/llvm/tools/bugpoint/ToolRunner.h
@@ -64,7 +64,7 @@ public:
FileType fileType,
const std::string &InputFile,
const std::string &OutputFile,
- std::string *Error = 0,
+ std::string *Error = 0,
const std::vector<std::string> &GCCArgs =
std::vector<std::string>(),
unsigned Timeout = 0,
diff --git a/contrib/llvm/tools/clang/CMakeLists.txt b/contrib/llvm/tools/clang/CMakeLists.txt
index 1be646d..1ba2a62 100644
--- a/contrib/llvm/tools/clang/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/CMakeLists.txt
@@ -1,10 +1,5 @@
# Clang version information
-# Make sure that CMake reconfigures when the version changes.
-configure_file(
- ${CMAKE_CURRENT_SOURCE_DIR}/VER
- ${CMAKE_CURRENT_BINARY_DIR}/VER)
-
set(CLANG_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(CLANG_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
@@ -28,12 +23,28 @@ if( NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR )
endif()
endif()
-# Compute the Clang version from the contents of VER
-file(READ ${CMAKE_CURRENT_SOURCE_DIR}/VER CLANG_VERSION_DATA)
+# Compute the Clang version from the LLVM version.
string(REGEX MATCH "[0-9]+\\.[0-9]+(\\.[0-9]+)?" CLANG_VERSION
- ${CLANG_VERSION_DATA})
+ ${PACKAGE_VERSION})
message(STATUS "Clang version: ${CLANG_VERSION}")
+string(REGEX REPLACE "([0-9]+)\\.[0-9]+(\\.[0-9]+)?" "\\1" CLANG_VERSION_MAJOR
+ ${CLANG_VERSION})
+string(REGEX REPLACE "[0-9]+\\.([0-9]+)(\\.[0-9]+)?" "\\1" CLANG_VERSION_MINOR
+ ${CLANG_VERSION})
+if (${CLANG_VERSION} MATCHES "[0-9]+\\.[0-9]+\\.[0-9]+")
+ set(CLANG_HAS_VERSION_PATCHLEVEL 1)
+ string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+)" "\\1" CLANG_VERSION_PATCHLEVEL
+ ${CLANG_VERSION})
+else()
+ set(CLANG_HAS_VERSION_PATCHLEVEL 0)
+endif()
+
+# Configure the Version.inc file.
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/include/clang/Basic/Version.inc.in
+ ${CMAKE_CURRENT_BINARY_DIR}/include/clang/Basic/Version.inc)
+
# Add appropriate flags for GCC
if (CMAKE_COMPILER_IS_GNUCXX)
# FIXME: Turn off exceptions, RTTI:
@@ -41,6 +52,10 @@ if (CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-common -Woverloaded-virtual -pedantic -Wno-long-long -Wall -W -Wno-unused-parameter -Wwrite-strings")
endif ()
+if (APPLE)
+ set(CMAKE_MODULE_LINKER_FLAGS "-Wl,-flat_namespace -Wl,-undefined -Wl,suppress")
+endif ()
+
macro(add_clang_library name)
set(srcs ${ARGN})
if(MSVC_IDE OR XCODE)
@@ -54,7 +69,9 @@ macro(add_clang_library name)
../../include/clang${dir}/*.def)
set(srcs ${srcs} ${headers})
endif(MSVC_IDE OR XCODE)
- if (SHARED_LIBRARY)
+ if (MODULE)
+ set(libkind MODULE)
+ elseif (SHARED_LIBRARY)
set(libkind SHARED)
else()
set(libkind)
diff --git a/contrib/llvm/tools/clang/Makefile b/contrib/llvm/tools/clang/Makefile
index 39cf9c6..f871c25 100644
--- a/contrib/llvm/tools/clang/Makefile
+++ b/contrib/llvm/tools/clang/Makefile
@@ -1,14 +1,63 @@
-LEVEL = ../..
-DIRS := include lib tools docs
+##===- Makefile --------------------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+# If CLANG_LEVEL is not set, then we are the top-level Makefile. Otherwise, we
+# are being included from a subdirectory makefile.
+
+ifndef CLANG_LEVEL
+
+IS_TOP_LEVEL := 1
+CLANG_LEVEL := .
+DIRS := include lib tools runtime docs
PARALLEL_DIRS :=
ifeq ($(BUILD_EXAMPLES),1)
PARALLEL_DIRS += examples
endif
+endif
+
+ifeq ($(MAKECMDGOALS),libs-only)
+ DIRS := $(filter-out tools docs, $(DIRS))
+ OPTIONAL_DIRS :=
+endif
+
+###
+# Common Makefile code, shared by all Clang Makefiles.
+
+# Set LLVM source root level.
+LEVEL := $(CLANG_LEVEL)/../..
+# Include LLVM common makefile.
include $(LEVEL)/Makefile.common
+# Set common Clang build flags.
+CPP.Flags += -I$(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include -I$(PROJ_OBJ_DIR)/$(CLANG_LEVEL)/include
+ifdef CLANG_VENDOR
+CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
+endif
+
+# Disable -fstrict-aliasing. Darwin disables it by default (and LLVM doesn't
+# work with it enabled with GCC), Clang/llvm-gc don't support it yet, and newer
+# GCC's have false positive warnings with it on Linux (which prove a pain to
+# fix). For example:
+# http://gcc.gnu.org/PR41874
+# http://gcc.gnu.org/PR41838
+#
+# We can revisit this when LLVM/Clang support it.
+CXX.Flags += -fno-strict-aliasing
+
+###
+# Clang Top Level specific stuff.
+
+ifeq ($(IS_TOP_LEVEL),1)
+
ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) if [ ! -f test/Makefile ]; then \
@@ -26,6 +75,8 @@ report::
clean::
@ $(MAKE) -C test clean
+libs-only: all
+
tags::
$(Verb) etags `find . -type f -name '*.h' -or -name '*.cpp' | \
grep -v /lib/Headers | grep -v /test/`
@@ -37,3 +88,5 @@ cscope.files:
-or -name '*.h' > cscope.files
.PHONY: test report clean cscope.files
+
+endif
diff --git a/contrib/llvm/tools/clang/NOTES.txt b/contrib/llvm/tools/clang/NOTES.txt
index beceb7d..f66a961 100644
--- a/contrib/llvm/tools/clang/NOTES.txt
+++ b/contrib/llvm/tools/clang/NOTES.txt
@@ -13,8 +13,7 @@ This is similar to -Eonly.
//===---------------------------------------------------------------------===//
-Creating and using a PTH file for performance measurement (use a release-asserts
-build).
+Creating and using a PTH file for performance measurement (use a release build).
$ clang -ccc-pch-is-pth -x objective-c-header INPUTS/Cocoa_h.m -o /tmp/tokencache
$ clang -cc1 -token-cache /tmp/tokencache INPUTS/Cocoa_h.m
diff --git a/contrib/llvm/tools/clang/README.txt b/contrib/llvm/tools/clang/README.txt
index 924ecc4..44ce723a 100644
--- a/contrib/llvm/tools/clang/README.txt
+++ b/contrib/llvm/tools/clang/README.txt
@@ -4,7 +4,7 @@
Welcome to Clang. This is a compiler front-end for the C family of languages
(C, C++, Objective-C, and Objective-C++) which is built as part of the LLVM
-compiler intrastructure project.
+compiler infrastructure project.
Unlike many other compiler frontends, Clang is useful for a number of things
beyond just compiling code: we intend for Clang to be host to a number of
diff --git a/contrib/llvm/tools/clang/include/Makefile b/contrib/llvm/tools/clang/include/Makefile
index f686d6a..79b9adf 100644
--- a/contrib/llvm/tools/clang/include/Makefile
+++ b/contrib/llvm/tools/clang/include/Makefile
@@ -1,4 +1,4 @@
-LEVEL = ../../..
+CLANG_LEVEL := ..
DIRS := clang clang-c
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
index 86926bd..b377b6d 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Index.h
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -343,12 +343,6 @@ CINDEX_LINKAGE CXSourceLocation clang_getRangeStart(CXSourceRange range);
CINDEX_LINKAGE CXSourceLocation clang_getRangeEnd(CXSourceRange range);
/**
- * \brief Determine if the source location occurs within the main file
- * of the translation unit (as opposed to an included header).
- */
-CINDEX_LINKAGE unsigned clang_isFromMainFile(CXSourceLocation loc);
-
-/**
* @}
*/
@@ -1092,7 +1086,9 @@ enum CXTypeKind {
CXType_Enum = 106,
CXType_Typedef = 107,
CXType_ObjCInterface = 108,
- CXType_ObjCObjectPointer = 109
+ CXType_ObjCObjectPointer = 109,
+ CXType_FunctionNoProto = 110,
+ CXType_FunctionProto = 111
};
/**
@@ -1145,6 +1141,17 @@ CINDEX_LINKAGE CXCursor clang_getTypeDeclaration(CXType T);
CINDEX_LINKAGE CXString clang_getTypeKindSpelling(enum CXTypeKind K);
/**
+ * \brief Retrieve the result type associated with a function type.
+ */
+CINDEX_LINKAGE CXType clang_getResultType(CXType T);
+
+/**
+ * \brief Retrieve the result type associated with a given cursor. This only
+ * returns a valid type of the cursor refers to a function or method.
+ */
+CINDEX_LINKAGE CXType clang_getCursorResultType(CXCursor C);
+
+/**
* @}
*/
diff --git a/contrib/llvm/tools/clang/include/clang-c/Makefile b/contrib/llvm/tools/clang/include/clang-c/Makefile
index 5e3522f..98ea719 100644
--- a/contrib/llvm/tools/clang/include/clang-c/Makefile
+++ b/contrib/llvm/tools/clang/include/clang-c/Makefile
@@ -1,7 +1,7 @@
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
DIRS :=
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
install-local::
$(Echo) Installing Clang C API include files
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
index 87a12cde..3799451 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -62,6 +62,7 @@ namespace clang {
class RecordDecl;
class StoredDeclsMap;
class TagDecl;
+ class TemplateTemplateParmDecl;
class TemplateTypeParmDecl;
class TranslationUnitDecl;
class TypeDecl;
@@ -75,6 +76,8 @@ namespace clang {
/// ASTContext - This class holds long-lived AST nodes (such as types and
/// decls) that can be referred to throughout the semantic analysis of a file.
class ASTContext {
+ ASTContext &this_() { return *this; }
+
std::vector<Type*> Types;
llvm::FoldingSet<ExtQuals> ExtQualNodes;
llvm::FoldingSet<ComplexType> ComplexTypes;
@@ -95,9 +98,12 @@ class ASTContext {
llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
llvm::FoldingSet<SubstTemplateTypeParmType> SubstTemplateTypeParmTypes;
- llvm::FoldingSet<TemplateSpecializationType> TemplateSpecializationTypes;
+ llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
+ TemplateSpecializationTypes;
llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
llvm::FoldingSet<DependentNameType> DependentNameTypes;
+ llvm::ContextualFoldingSet<DependentTemplateSpecializationType, ASTContext&>
+ DependentTemplateSpecializationTypes;
llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
@@ -122,6 +128,30 @@ class ASTContext {
/// \brief Mapping from ObjCContainers to their ObjCImplementations.
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls;
+ /// \brief Representation of a "canonical" template template parameter that
+ /// is used in canonical template names.
+ class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
+ TemplateTemplateParmDecl *Parm;
+
+ public:
+ CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm)
+ : Parm(Parm) { }
+
+ TemplateTemplateParmDecl *getParam() const { return Parm; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Parm); }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm);
+ };
+ llvm::FoldingSet<CanonicalTemplateTemplateParm> CanonTemplateTemplateParms;
+
+ TemplateTemplateParmDecl *getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP);
+
+ /// \brief Whether __[u]int128_t identifier is installed.
+ bool IsInt128Installed;
+
/// BuiltinVaListType - built-in va list type.
/// This is initially null and set by Sema::LazilyCreateBuiltin when
/// a builtin that takes a valist is encountered.
@@ -162,6 +192,8 @@ class ASTContext {
/// \brief Type for the Block descriptor for Blocks CodeGen.
RecordDecl *BlockDescriptorExtendedType;
+ TypeSourceInfo NullTypeSourceInfo;
+
/// \brief Keeps track of all declaration attributes.
///
/// Since so few decls have attrs, we keep them in a hash map instead of
@@ -302,7 +334,8 @@ public:
/// \brief Note that the static data member \p Inst is an instantiation of
/// the static data member template \p Tmpl of a class template.
void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
- TemplateSpecializationKind TSK);
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
/// \brief If the given using decl is an instantiation of a
/// (possibly unresolved) using decl from a template instantiation,
@@ -329,6 +362,8 @@ public:
overridden_cxx_method_iterator
overridden_methods_end(const CXXMethodDecl *Method) const;
+ unsigned overridden_methods_size(const CXXMethodDecl *Method) const;
+
/// \brief Note that the given C++ \p Method overrides the given \p
/// Overridden method.
void addOverriddenMethod(const CXXMethodDecl *Method,
@@ -534,7 +569,7 @@ public:
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType getVectorType(QualType VectorType, unsigned NumElts,
- bool AltiVec, bool IsPixel);
+ VectorType::AltiVecSpecific AltiVecSpec);
/// getExtVectorType - Return the unique reference to an extended vector type
/// of the specified element type and size. VectorType must be a built-in
@@ -585,7 +620,11 @@ public:
/// getTypedefType - Return the unique reference to the type for the
/// specified typename decl.
- QualType getTypedefType(const TypedefDecl *Decl);
+ QualType getTypedefType(const TypedefDecl *Decl, QualType Canon = QualType());
+
+ QualType getRecordType(const RecordDecl *Decl);
+
+ QualType getEnumType(const EnumDecl *Decl);
QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST);
@@ -599,13 +638,15 @@ public:
QualType getTemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs,
- QualType Canon = QualType(),
- bool IsCurrentInstantiation = false);
+ QualType Canon = QualType());
+
+ QualType getCanonicalTemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
QualType getTemplateSpecializationType(TemplateName T,
const TemplateArgumentListInfo &Args,
- QualType Canon = QualType(),
- bool IsCurrentInstantiation = false);
+ QualType Canon = QualType());
TypeSourceInfo *
getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc,
@@ -619,10 +660,16 @@ public:
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
QualType Canon = QualType());
- QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- const TemplateSpecializationType *TemplateId,
- QualType Canon = QualType());
+
+ QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args);
+ QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args);
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl);
@@ -780,6 +827,10 @@ public:
/// purpose in characters.
CharUnits getObjCEncodingTypeSize(QualType t);
+ /// \brief Whether __[u]int128_t identifier is installed.
+ bool isInt128Installed() const { return IsInt128Installed; }
+ void setInt128Installed() { IsInt128Installed = true; }
+
/// This setter/getter represents the ObjC 'id' type. It is setup lazily, by
/// Sema. id is always a (typedef for a) pointer type, a pointer to a struct.
QualType getObjCIdType() const { return ObjCIdTypedefType; }
@@ -943,8 +994,9 @@ public:
const ASTRecordLayout &
getASTObjCImplementationLayout(const ObjCImplementationDecl *D);
- /// getKeyFunction - Get the key function for the given record decl.
- /// The key function is, according to the Itanium C++ ABI section 5.2.3:
+ /// getKeyFunction - Get the key function for the given record decl, or NULL
+ /// if there isn't one. The key function is, according to the Itanium C++ ABI
+ /// section 5.2.3:
///
/// ...the first non-pure virtual function that is not inline at the point
/// of class definition.
@@ -1013,6 +1065,8 @@ public:
return UnqualT1 == UnqualT2;
}
+ bool UnwrapSimilarPointerTypes(QualType &T1, QualType &T2);
+
/// \brief Retrieves the "canonical" declaration of
/// \brief Retrieves the "canonical" nested name specifier for a
@@ -1272,6 +1326,8 @@ public:
TypeSourceInfo *
getTrivialTypeSourceInfo(QualType T, SourceLocation Loc = SourceLocation());
+ TypeSourceInfo *getNullTypeSourceInfo() { return &NullTypeSourceInfo; }
+
/// \brief Add a deallocation callback that will be invoked when the
/// ASTContext is destroyed.
///
@@ -1280,6 +1336,38 @@ public:
/// \brief Data Pointer data that will be provided to the callback function
/// when it is called.
void AddDeallocation(void (*Callback)(void*), void *Data);
+
+ //===--------------------------------------------------------------------===//
+ // Statistics
+ //===--------------------------------------------------------------------===//
+
+ /// \brief The number of implicitly-declared default constructors.
+ static unsigned NumImplicitDefaultConstructors;
+
+ /// \brief The number of implicitly-declared default constructors for
+ /// which declarations were built.
+ static unsigned NumImplicitDefaultConstructorsDeclared;
+
+ /// \brief The number of implicitly-declared copy constructors.
+ static unsigned NumImplicitCopyConstructors;
+
+ /// \brief The number of implicitly-declared copy constructors for
+ /// which declarations were built.
+ static unsigned NumImplicitCopyConstructorsDeclared;
+
+ /// \brief The number of implicitly-declared copy assignment operators.
+ static unsigned NumImplicitCopyAssignmentOperators;
+
+ /// \brief The number of implicitly-declared copy assignment operators for
+ /// which declarations were built.
+ static unsigned NumImplicitCopyAssignmentOperatorsDeclared;
+
+ /// \brief The number of implicitly-declared destructors.
+ static unsigned NumImplicitDestructors;
+
+ /// \brief The number of implicitly-declared destructors for which
+ /// declarations were built.
+ static unsigned NumImplicitDestructorsDeclared;
private:
ASTContext(const ASTContext&); // DO NOT IMPLEMENT
@@ -1308,6 +1396,11 @@ private:
// by DeclContext objects. This probably should not be in ASTContext,
// but we include it here so that ASTContext can quickly deallocate them.
llvm::PointerIntPair<StoredDeclsMap*,1> LastSDM;
+
+ /// \brief A counter used to uniquely identify "blocks".
+ unsigned int UniqueBlockByRefTypeID;
+ unsigned int UniqueBlockParmTypeID;
+
friend class DeclContext;
friend class DeclarationNameTable;
void ReleaseDeclContextMaps();
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
index 3240e50..9faa62e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -16,6 +16,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/StringRef.h"
+#include "clang/Basic/AttrKinds.h"
#include <cassert>
#include <cstring>
#include <algorithm>
@@ -25,6 +26,7 @@ namespace clang {
class ASTContext;
class IdentifierInfo;
class ObjCInterfaceDecl;
+ class Expr;
}
// Defined in ASTContext.h
@@ -41,75 +43,9 @@ namespace clang {
/// Attr - This represents one attribute.
class Attr {
-public:
- enum Kind {
- Alias,
- Aligned,
- AlignMac68k,
- AlwaysInline,
- AnalyzerNoReturn, // Clang-specific.
- Annotate,
- AsmLabel, // Represent GCC asm label extension.
- BaseCheck,
- Blocks,
- CDecl,
- Cleanup,
- Const,
- Constructor,
- Deprecated,
- Destructor,
- FastCall,
- Final,
- Format,
- FormatArg,
- GNUInline,
- Hiding,
- IBOutletKind, // Clang-specific. Use "Kind" suffix to not conflict w/ macro.
- IBOutletCollectionKind, // Clang-specific.
- IBActionKind, // Clang-specific. Use "Kind" suffix to not conflict w/ macro.
- Malloc,
- MaxFieldAlignment,
- NoDebug,
- NoInline,
- NonNull,
- NoReturn,
- NoThrow,
- ObjCException,
- ObjCNSObject,
- Override,
- CFReturnsRetained, // Clang/Checker-specific.
- CFReturnsNotRetained, // Clang/Checker-specific.
- NSReturnsRetained, // Clang/Checker-specific.
- NSReturnsNotRetained, // Clang/Checker-specific.
- Overloadable, // Clang-specific
- Packed,
- Pure,
- Regparm,
- ReqdWorkGroupSize, // OpenCL-specific
- Section,
- Sentinel,
- StdCall,
- ThisCall,
- TransparentUnion,
- Unavailable,
- Unused,
- Used,
- Visibility,
- WarnUnusedResult,
- Weak,
- WeakImport,
- WeakRef,
-
- FIRST_TARGET_ATTRIBUTE,
- DLLExport,
- DLLImport,
- MSP430Interrupt,
- X86ForceAlignArgPointer
- };
-
private:
Attr *Next;
- Kind AttrKind;
+ attr::Kind AttrKind;
bool Inherited : 1;
protected:
@@ -122,7 +58,7 @@ protected:
}
protected:
- Attr(Kind AK) : Next(0), AttrKind(AK), Inherited(false) {}
+ Attr(attr::Kind AK) : Next(0), AttrKind(AK), Inherited(false) {}
virtual ~Attr() {
assert(Next == 0 && "Destroy didn't work");
}
@@ -133,7 +69,7 @@ public:
/// declarations.
virtual bool isMerged() const { return true; }
- Kind getKind() const { return AttrKind; }
+ attr::Kind getKind() const { return AttrKind; }
Attr *getNext() { return Next; }
const Attr *getNext() const { return Next; }
@@ -163,13 +99,15 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *) { return true; }
};
+
+#include "clang/AST/Attrs.inc"
class AttrWithString : public Attr {
private:
const char *Str;
unsigned StrLen;
protected:
- AttrWithString(Attr::Kind AK, ASTContext &C, llvm::StringRef s);
+ AttrWithString(attr::Kind AK, ASTContext &C, llvm::StringRef s);
llvm::StringRef getString() const { return llvm::StringRef(Str, StrLen); }
void ReplaceString(ASTContext &C, llvm::StringRef newS);
public:
@@ -179,9 +117,9 @@ public:
#define DEF_SIMPLE_ATTR(ATTR) \
class ATTR##Attr : public Attr { \
public: \
- ATTR##Attr() : Attr(ATTR) {} \
+ ATTR##Attr() : Attr(attr::ATTR) {} \
virtual Attr *clone(ASTContext &C) const; \
- static bool classof(const Attr *A) { return A->getKind() == ATTR; } \
+ static bool classof(const Attr *A) { return A->getKind() == attr::ATTR; } \
static bool classof(const ATTR##Attr *A) { return true; } \
}
@@ -194,7 +132,7 @@ class MaxFieldAlignmentAttr : public Attr {
public:
MaxFieldAlignmentAttr(unsigned alignment)
- : Attr(MaxFieldAlignment), Alignment(alignment) {}
+ : Attr(attr::MaxFieldAlignment), Alignment(alignment) {}
/// getAlignment - The specified alignment in bits.
unsigned getAlignment() const { return Alignment; }
@@ -203,36 +141,58 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == MaxFieldAlignment;
+ return A->getKind() == attr::MaxFieldAlignment;
}
static bool classof(const MaxFieldAlignmentAttr *A) { return true; }
};
DEF_SIMPLE_ATTR(AlignMac68k);
+/// \brief Atribute for specifying the alignment of a variable or type.
+///
+/// This node will either contain the precise Alignment (in bits, not bytes!)
+/// or will contain the expression for the alignment attribute in the case of
+/// a dependent expression within a class or function template. At template
+/// instantiation time these are transformed into concrete attributes.
class AlignedAttr : public Attr {
unsigned Alignment;
+ Expr *AlignmentExpr;
public:
AlignedAttr(unsigned alignment)
- : Attr(Aligned), Alignment(alignment) {}
+ : Attr(attr::Aligned), Alignment(alignment), AlignmentExpr(0) {}
+ AlignedAttr(Expr *E)
+ : Attr(attr::Aligned), Alignment(0), AlignmentExpr(E) {}
+
+ /// getAlignmentExpr - Get a dependent alignment expression if one is present.
+ Expr *getAlignmentExpr() const {
+ return AlignmentExpr;
+ }
+
+ /// isDependent - Is the alignment a dependent expression
+ bool isDependent() const {
+ return getAlignmentExpr();
+ }
+
+ /// getAlignment - The specified alignment in bits. Requires !isDependent().
+ unsigned getAlignment() const {
+ assert(!isDependent() && "Cannot get a value dependent alignment");
+ return Alignment;
+ }
- /// getAlignment - The specified alignment in bits.
- unsigned getAlignment() const { return Alignment; }
-
/// getMaxAlignment - Get the maximum alignment of attributes on this list.
unsigned getMaxAlignment() const {
const AlignedAttr *Next = getNext<AlignedAttr>();
if (Next)
- return std::max(Next->getMaxAlignment(), Alignment);
+ return std::max(Next->getMaxAlignment(), getAlignment());
else
- return Alignment;
+ return getAlignment();
}
virtual Attr* clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == Aligned;
+ return A->getKind() == attr::Aligned;
}
static bool classof(const AlignedAttr *A) { return true; }
};
@@ -240,7 +200,7 @@ public:
class AnnotateAttr : public AttrWithString {
public:
AnnotateAttr(ASTContext &C, llvm::StringRef ann)
- : AttrWithString(Annotate, C, ann) {}
+ : AttrWithString(attr::Annotate, C, ann) {}
llvm::StringRef getAnnotation() const { return getString(); }
@@ -248,7 +208,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == Annotate;
+ return A->getKind() == attr::Annotate;
}
static bool classof(const AnnotateAttr *A) { return true; }
};
@@ -256,7 +216,7 @@ public:
class AsmLabelAttr : public AttrWithString {
public:
AsmLabelAttr(ASTContext &C, llvm::StringRef L)
- : AttrWithString(AsmLabel, C, L) {}
+ : AttrWithString(attr::AsmLabel, C, L) {}
llvm::StringRef getLabel() const { return getString(); }
@@ -264,7 +224,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == AsmLabel;
+ return A->getKind() == attr::AsmLabel;
}
static bool classof(const AsmLabelAttr *A) { return true; }
};
@@ -274,54 +234,56 @@ DEF_SIMPLE_ATTR(AlwaysInline);
class AliasAttr : public AttrWithString {
public:
AliasAttr(ASTContext &C, llvm::StringRef aliasee)
- : AttrWithString(Alias, C, aliasee) {}
+ : AttrWithString(attr::Alias, C, aliasee) {}
llvm::StringRef getAliasee() const { return getString(); }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Alias; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Alias; }
static bool classof(const AliasAttr *A) { return true; }
};
class ConstructorAttr : public Attr {
int priority;
public:
- ConstructorAttr(int p) : Attr(Constructor), priority(p) {}
+ ConstructorAttr(int p) : Attr(attr::Constructor), priority(p) {}
int getPriority() const { return priority; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Constructor; }
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::Constructor; }
static bool classof(const ConstructorAttr *A) { return true; }
};
class DestructorAttr : public Attr {
int priority;
public:
- DestructorAttr(int p) : Attr(Destructor), priority(p) {}
+ DestructorAttr(int p) : Attr(attr::Destructor), priority(p) {}
int getPriority() const { return priority; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Destructor; }
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::Destructor; }
static bool classof(const DestructorAttr *A) { return true; }
};
class IBOutletAttr : public Attr {
public:
- IBOutletAttr() : Attr(IBOutletKind) {}
+ IBOutletAttr() : Attr(attr::IBOutlet) {}
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == IBOutletKind;
+ return A->getKind() == attr::IBOutlet;
}
static bool classof(const IBOutletAttr *A) { return true; }
};
@@ -330,7 +292,7 @@ class IBOutletCollectionAttr : public Attr {
const ObjCInterfaceDecl *D;
public:
IBOutletCollectionAttr(const ObjCInterfaceDecl *d = 0)
- : Attr(IBOutletCollectionKind), D(d) {}
+ : Attr(attr::IBOutletCollection), D(d) {}
const ObjCInterfaceDecl *getClass() const { return D; }
@@ -338,35 +300,35 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == IBOutletCollectionKind;
+ return A->getKind() == attr::IBOutletCollection;
}
static bool classof(const IBOutletCollectionAttr *A) { return true; }
};
class IBActionAttr : public Attr {
public:
- IBActionAttr() : Attr(IBActionKind) {}
+ IBActionAttr() : Attr(attr::IBAction) {}
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == IBActionKind;
+ return A->getKind() == attr::IBAction;
}
static bool classof(const IBActionAttr *A) { return true; }
};
DEF_SIMPLE_ATTR(AnalyzerNoReturn);
DEF_SIMPLE_ATTR(Deprecated);
-DEF_SIMPLE_ATTR(Final);
DEF_SIMPLE_ATTR(GNUInline);
DEF_SIMPLE_ATTR(Malloc);
DEF_SIMPLE_ATTR(NoReturn);
+DEF_SIMPLE_ATTR(NoInstrumentFunction);
class SectionAttr : public AttrWithString {
public:
SectionAttr(ASTContext &C, llvm::StringRef N)
- : AttrWithString(Section, C, N) {}
+ : AttrWithString(attr::Section, C, N) {}
llvm::StringRef getName() const { return getString(); }
@@ -374,7 +336,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == Section;
+ return A->getKind() == attr::Section;
}
static bool classof(const SectionAttr *A) { return true; }
};
@@ -408,7 +370,7 @@ public:
virtual Attr *clone(ASTContext &C) const;
- static bool classof(const Attr *A) { return A->getKind() == NonNull; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::NonNull; }
static bool classof(const NonNullAttr *A) { return true; }
};
@@ -416,7 +378,7 @@ class FormatAttr : public AttrWithString {
int formatIdx, firstArg;
public:
FormatAttr(ASTContext &C, llvm::StringRef type, int idx, int first)
- : AttrWithString(Format, C, type), formatIdx(idx), firstArg(first) {}
+ : AttrWithString(attr::Format, C, type), formatIdx(idx), firstArg(first) {}
llvm::StringRef getType() const { return getString(); }
void setType(ASTContext &C, llvm::StringRef type);
@@ -426,27 +388,27 @@ public:
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Format; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Format; }
static bool classof(const FormatAttr *A) { return true; }
};
class FormatArgAttr : public Attr {
int formatIdx;
public:
- FormatArgAttr(int idx) : Attr(FormatArg), formatIdx(idx) {}
+ FormatArgAttr(int idx) : Attr(attr::FormatArg), formatIdx(idx) {}
int getFormatIdx() const { return formatIdx; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == FormatArg; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::FormatArg; }
static bool classof(const FormatArgAttr *A) { return true; }
};
class SentinelAttr : public Attr {
int sentinel, NullPos;
public:
- SentinelAttr(int sentinel_val, int nullPos) : Attr(Sentinel),
+ SentinelAttr(int sentinel_val, int nullPos) : Attr(attr::Sentinel),
sentinel(sentinel_val), NullPos(nullPos) {}
int getSentinel() const { return sentinel; }
int getNullPos() const { return NullPos; }
@@ -454,7 +416,7 @@ public:
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Sentinel; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Sentinel; }
static bool classof(const SentinelAttr *A) { return true; }
};
@@ -469,7 +431,7 @@ public:
private:
VisibilityTypes VisibilityType;
public:
- VisibilityAttr(VisibilityTypes v) : Attr(Visibility),
+ VisibilityAttr(VisibilityTypes v) : Attr(attr::Visibility),
VisibilityType(v) {}
VisibilityTypes getVisibility() const { return VisibilityType; }
@@ -477,7 +439,8 @@ public:
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Visibility; }
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::Visibility; }
static bool classof(const VisibilityAttr *A) { return true; }
};
@@ -491,13 +454,14 @@ DEF_SIMPLE_ATTR(ObjCException);
class OverloadableAttr : public Attr {
public:
- OverloadableAttr() : Attr(Overloadable) { }
+ OverloadableAttr() : Attr(attr::Overloadable) { }
virtual bool isMerged() const { return false; }
virtual Attr *clone(ASTContext &C) const;
- static bool classof(const Attr *A) { return A->getKind() == Overloadable; }
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::Overloadable; }
static bool classof(const OverloadableAttr *) { return true; }
};
@@ -509,14 +473,14 @@ public:
private:
BlocksAttrTypes BlocksAttrType;
public:
- BlocksAttr(BlocksAttrTypes t) : Attr(Blocks), BlocksAttrType(t) {}
+ BlocksAttr(BlocksAttrTypes t) : Attr(attr::Blocks), BlocksAttrType(t) {}
BlocksAttrTypes getType() const { return BlocksAttrType; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Blocks; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Blocks; }
static bool classof(const BlocksAttr *A) { return true; }
};
@@ -526,14 +490,14 @@ class CleanupAttr : public Attr {
FunctionDecl *FD;
public:
- CleanupAttr(FunctionDecl *fd) : Attr(Cleanup), FD(fd) {}
+ CleanupAttr(FunctionDecl *fd) : Attr(attr::Cleanup), FD(fd) {}
const FunctionDecl *getFunctionDecl() const { return FD; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Cleanup; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Cleanup; }
static bool classof(const CleanupAttr *A) { return true; }
};
@@ -545,14 +509,14 @@ class RegparmAttr : public Attr {
unsigned NumParams;
public:
- RegparmAttr(unsigned np) : Attr(Regparm), NumParams(np) {}
+ RegparmAttr(unsigned np) : Attr(attr::Regparm), NumParams(np) {}
unsigned getNumParams() const { return NumParams; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == Regparm; }
+ static bool classof(const Attr *A) { return A->getKind() == attr::Regparm; }
static bool classof(const RegparmAttr *A) { return true; }
};
@@ -560,7 +524,7 @@ class ReqdWorkGroupSizeAttr : public Attr {
unsigned X, Y, Z;
public:
ReqdWorkGroupSizeAttr(unsigned X, unsigned Y, unsigned Z)
- : Attr(ReqdWorkGroupSize), X(X), Y(Y), Z(Z) {}
+ : Attr(attr::ReqdWorkGroupSize), X(X), Y(Y), Z(Z) {}
unsigned getXDim() const { return X; }
unsigned getYDim() const { return Y; }
@@ -570,22 +534,34 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Attr *A) {
- return A->getKind() == ReqdWorkGroupSize;
+ return A->getKind() == attr::ReqdWorkGroupSize;
}
static bool classof(const ReqdWorkGroupSizeAttr *A) { return true; }
};
+class InitPriorityAttr : public Attr {
+ unsigned Priority;
+public:
+ InitPriorityAttr(unsigned priority)
+ : Attr(attr::InitPriority), Priority(priority) {}
+
+ virtual void Destroy(ASTContext &C) { Attr::Destroy(C); }
+
+ unsigned getPriority() const { return Priority; }
+
+ virtual Attr *clone(ASTContext &C) const;
+
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::InitPriority; }
+ static bool classof(const InitPriorityAttr *A) { return true; }
+};
+
// Checker-specific attributes.
DEF_SIMPLE_ATTR(CFReturnsNotRetained);
DEF_SIMPLE_ATTR(CFReturnsRetained);
DEF_SIMPLE_ATTR(NSReturnsNotRetained);
DEF_SIMPLE_ATTR(NSReturnsRetained);
-// C++0x member checking attributes.
-DEF_SIMPLE_ATTR(BaseCheck);
-DEF_SIMPLE_ATTR(Hiding);
-DEF_SIMPLE_ATTR(Override);
-
// Target-specific attributes
DEF_SIMPLE_ATTR(DLLImport);
DEF_SIMPLE_ATTR(DLLExport);
@@ -594,14 +570,15 @@ class MSP430InterruptAttr : public Attr {
unsigned Number;
public:
- MSP430InterruptAttr(unsigned n) : Attr(MSP430Interrupt), Number(n) {}
+ MSP430InterruptAttr(unsigned n) : Attr(attr::MSP430Interrupt), Number(n) {}
unsigned getNumber() const { return Number; }
virtual Attr *clone(ASTContext &C) const;
// Implement isa/cast/dyncast/etc.
- static bool classof(const Attr *A) { return A->getKind() == MSP430Interrupt; }
+ static bool classof(const Attr *A)
+ { return A->getKind() == attr::MSP430Interrupt; }
static bool classof(const MSP430InterruptAttr *A) { return true; }
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CMakeLists.txt b/contrib/llvm/tools/clang/include/clang/AST/CMakeLists.txt
index c24ea06..3b09071 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/include/clang/AST/CMakeLists.txt
@@ -1,5 +1,18 @@
-set(LLVM_TARGET_DEFINITIONS StmtNodes.td)
+set(LLVM_TARGET_DEFINITIONS ../Basic/Attr.td)
+tablegen(Attrs.inc
+ -gen-clang-attr-classes
+ -I ${CMAKE_CURRENT_SOURCE_DIR}/../../)
+add_custom_target(ClangAttrClasses
+ DEPENDS Attrs.inc)
+
+set(LLVM_TARGET_DEFINITIONS ../Basic/StmtNodes.td)
tablegen(StmtNodes.inc
-gen-clang-stmt-nodes)
add_custom_target(ClangStmtNodes
DEPENDS StmtNodes.inc)
+
+set(LLVM_TARGET_DEFINITIONS ../Basic/DeclNodes.td)
+tablegen(DeclNodes.inc
+ -gen-clang-decl-nodes)
+add_custom_target(ClangDeclNodes
+ DEPENDS DeclNodes.inc)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
index 4afb81d..9f97fd8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
@@ -247,6 +247,7 @@ public:
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isCharType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isWideCharType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegralType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegralOrEnumerationType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isRealFloatingType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isComplexType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isAnyComplexType)
@@ -269,8 +270,10 @@ public:
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isNullPtrType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isDependentType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isOverloadableType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isArrayType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasPointerRepresentation)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasObjCPointerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasFloatingRepresentation)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isPromotableIntegerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSignedIntegerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnsignedIntegerType)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
index 7d5b66e..39cd51f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -28,6 +28,8 @@ class FunctionTemplateDecl;
class Stmt;
class CompoundStmt;
class StringLiteral;
+class NestedNameSpecifier;
+class TemplateParameterList;
class TemplateArgumentList;
class MemberSpecializationInfo;
class FunctionTemplateSpecializationInfo;
@@ -216,7 +218,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const NamedDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= NamedFirst && K <= NamedLast; }
+ static bool classofKind(Kind K) { return K >= firstNamed && K <= lastNamed; }
};
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
@@ -342,6 +344,9 @@ public:
static NamespaceDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<NamespaceDecl *>(const_cast<DeclContext*>(DC));
}
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// ValueDecl - Represent the declaration of a variable (in which case it is
@@ -361,7 +366,38 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ValueDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= ValueFirst && K <= ValueLast; }
+ static bool classofKind(Kind K) { return K >= firstValue && K <= lastValue; }
+};
+
+/// QualifierInfo - A struct with extended info about a syntactic
+/// name qualifier, to be used for the case of out-of-line declarations.
+struct QualifierInfo {
+ /// NNS - The syntactic name qualifier.
+ NestedNameSpecifier *NNS;
+ /// NNSRange - The source range for the qualifier.
+ SourceRange NNSRange;
+ /// NumTemplParamLists - The number of template parameter lists
+ /// that were matched against the template-ids occurring into the NNS.
+ unsigned NumTemplParamLists;
+ /// TemplParamLists - A new-allocated array of size NumTemplParamLists,
+ /// containing pointers to the matched template parameter lists.
+ TemplateParameterList** TemplParamLists;
+
+ /// Default constructor.
+ QualifierInfo()
+ : NNS(0), NNSRange(), NumTemplParamLists(0), TemplParamLists(0) {}
+ /// setTemplateParameterListsInfo - Sets info about matched template
+ /// parameter lists.
+ void setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists);
+
+ void Destroy(ASTContext &Context);
+
+private:
+ // Copy constructor and copy assignment are disabled.
+ QualifierInfo(const QualifierInfo&);
+ QualifierInfo& operator=(const QualifierInfo&);
};
/// \brief Represents a ValueDecl that came out of a declarator.
@@ -369,10 +405,8 @@ public:
class DeclaratorDecl : public ValueDecl {
// A struct representing both a TInfo and a syntactic qualifier,
// to be used for the (uncommon) case of out-of-line declarations.
- struct ExtInfo {
+ struct ExtInfo : public QualifierInfo {
TypeSourceInfo *TInfo;
- NestedNameSpecifier *NNS;
- SourceRange NNSRange;
};
llvm::PointerUnion<TypeSourceInfo*, ExtInfo*> DeclInfo;
@@ -392,32 +426,55 @@ public:
TypeSourceInfo *getTypeSourceInfo() const {
return hasExtInfo()
- ? DeclInfo.get<ExtInfo*>()->TInfo
+ ? getExtInfo()->TInfo
: DeclInfo.get<TypeSourceInfo*>();
}
void setTypeSourceInfo(TypeSourceInfo *TI) {
if (hasExtInfo())
- DeclInfo.get<ExtInfo*>()->TInfo = TI;
+ getExtInfo()->TInfo = TI;
else
DeclInfo = TI;
}
+ /// getInnerLocStart - Return SourceLocation representing start of source
+ /// range ignoring outer template declarations.
+ virtual SourceLocation getInnerLocStart() const { return getLocation(); }
+
+ /// getOuterLocStart - Return SourceLocation representing start of source
+ /// range taking into account any outer template declarations.
+ SourceLocation getOuterLocStart() const;
+ SourceRange getSourceRange() const {
+ return SourceRange(getOuterLocStart(), getLocation());
+ }
+
NestedNameSpecifier *getQualifier() const {
- return hasExtInfo() ? DeclInfo.get<ExtInfo*>()->NNS : 0;
+ return hasExtInfo() ? getExtInfo()->NNS : 0;
}
SourceRange getQualifierRange() const {
- return hasExtInfo() ? DeclInfo.get<ExtInfo*>()->NNSRange : SourceRange();
+ return hasExtInfo() ? getExtInfo()->NNSRange : SourceRange();
}
void setQualifierInfo(NestedNameSpecifier *Qualifier,
SourceRange QualifierRange);
+ unsigned getNumTemplateParameterLists() const {
+ return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
+ }
+ TemplateParameterList *getTemplateParameterList(unsigned index) const {
+ assert(index < getNumTemplateParameterLists());
+ return getExtInfo()->TemplParamLists[index];
+ }
+ void setTemplateParameterListsInfo(ASTContext &Context, unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+ }
+
SourceLocation getTypeSpecStartLoc() const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const DeclaratorDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= DeclaratorFirst && K <= DeclaratorLast;
+ return K >= firstDeclarator && K <= lastDeclarator;
}
};
@@ -555,6 +612,7 @@ public:
virtual void Destroy(ASTContext& C);
virtual ~VarDecl();
+ virtual SourceLocation getInnerLocStart() const;
virtual SourceRange getSourceRange() const;
StorageClass getStorageClass() const { return (StorageClass)SClass; }
@@ -908,7 +966,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const VarDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= VarFirst && K <= VarLast; }
+ static bool classofKind(Kind K) { return K >= firstVar && K <= lastVar; }
};
class ImplicitParamDecl : public VarDecl {
@@ -1063,6 +1121,15 @@ public:
None, Extern, Static, PrivateExtern
};
+ /// \brief The kind of templated function a FunctionDecl can be.
+ enum TemplatedKind {
+ TK_NonTemplate,
+ TK_FunctionTemplate,
+ TK_MemberSpecialization,
+ TK_FunctionTemplateSpecialization,
+ TK_DependentFunctionTemplateSpecialization
+ };
+
private:
/// ParamInfo - new[]'d array of pointers to VarDecls for the formal
/// parameters of this function. This is null if a prototype or if there are
@@ -1154,17 +1221,31 @@ public:
bool Qualified) const;
virtual SourceRange getSourceRange() const {
- return SourceRange(getLocation(), EndRangeLoc);
+ return SourceRange(getOuterLocStart(), EndRangeLoc);
}
void setLocEnd(SourceLocation E) {
EndRangeLoc = E;
}
+ /// \brief Returns true if the function has a body (definition). The
+ /// function body might be in any of the (re-)declarations of this
+ /// function. The variant that accepts a FunctionDecl pointer will
+ /// set that function declaration to the actual declaration
+ /// containing the body (if there is one).
+ bool hasBody(const FunctionDecl *&Definition) const;
+
+ virtual bool hasBody() const {
+ const FunctionDecl* Definition;
+ return hasBody(Definition);
+ }
+
/// getBody - Retrieve the body (definition) of the function. The
/// function body might be in any of the (re-)declarations of this
/// function. The variant that accepts a FunctionDecl pointer will
/// set that function declaration to the actual declaration
/// containing the body (if there is one).
+ /// NOTE: For checking if there is a body, use hasBody() instead, to avoid
+ /// unnecessary PCH de-serialization of the body.
Stmt *getBody(const FunctionDecl *&Definition) const;
virtual Stmt *getBody() const {
@@ -1301,6 +1382,12 @@ public:
QualType getResultType() const {
return getType()->getAs<FunctionType>()->getResultType();
}
+
+ /// \brief Determine the type of an expression that calls this function.
+ QualType getCallResultType() const {
+ return getType()->getAs<FunctionType>()->getCallResultType(getASTContext());
+ }
+
StorageClass getStorageClass() const { return StorageClass(SClass); }
void setStorageClass(StorageClass SC) { SClass = SC; }
@@ -1355,6 +1442,9 @@ public:
/// X<int>::A is required, it will be instantiated from the
/// declaration returned by getInstantiatedFromMemberFunction().
FunctionDecl *getInstantiatedFromMemberFunction() const;
+
+ /// \brief What kind of templated function this is.
+ TemplatedKind getTemplatedKind() const;
/// \brief If this function is an instantiation of a member function of a
/// class template specialization, retrieves the member specialization
@@ -1437,8 +1527,6 @@ public:
/// \brief Specify that this function declaration is actually a function
/// template specialization.
///
- /// \param Context the AST context in which this function resides.
- ///
/// \param Template the function template that this function template
/// specialization specializes.
///
@@ -1450,11 +1538,53 @@ public:
/// be inserted.
///
/// \param TSK the kind of template specialization this is.
+ ///
+ /// \param TemplateArgsAsWritten location info of template arguments.
+ ///
+ /// \param PointOfInstantiation point at which the function template
+ /// specialization was first instantiated.
void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK = TSK_ImplicitInstantiation,
- const TemplateArgumentListInfo *TemplateArgsAsWritten = 0);
+ const TemplateArgumentListInfo *TemplateArgsAsWritten = 0,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ /// \brief Specify that this function declaration is actually a function
+ /// template specialization.
+ ///
+ /// \param Template the function template that this function template
+ /// specialization specializes.
+ ///
+ /// \param NumTemplateArgs number of template arguments that produced this
+ /// function template specialization from the template.
+ ///
+ /// \param TemplateArgs array of template arguments that produced this
+ /// function template specialization from the template.
+ ///
+ /// \param TSK the kind of template specialization this is.
+ ///
+ /// \param NumTemplateArgsAsWritten number of template arguments that produced
+ /// this function template specialization from the template.
+ ///
+ /// \param TemplateArgsAsWritten array of location info for the template
+ /// arguments.
+ ///
+ /// \param LAngleLoc location of left angle token.
+ ///
+ /// \param RAngleLoc location of right angle token.
+ ///
+ /// \param PointOfInstantiation point at which the function template
+ /// specialization was first instantiated.
+ void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
+ unsigned NumTemplateArgs,
+ const TemplateArgument *TemplateArgs,
+ TemplateSpecializationKind TSK,
+ unsigned NumTemplateArgsAsWritten,
+ TemplateArgumentLoc *TemplateArgsAsWritten,
+ SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc,
+ SourceLocation PointOfInstantiation);
/// \brief Specifies that this function declaration is actually a
/// dependent function template specialization.
@@ -1493,7 +1623,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FunctionDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= FunctionFirst && K <= FunctionLast;
+ return K >= firstFunction && K <= lastFunction;
}
static DeclContext *castToDeclContext(const FunctionDecl *D) {
return static_cast<DeclContext *>(const_cast<FunctionDecl*>(D));
@@ -1501,6 +1631,9 @@ public:
static FunctionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<FunctionDecl *>(const_cast<DeclContext*>(DC));
}
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
@@ -1556,7 +1689,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FieldDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= FieldFirst && K <= FieldLast; }
+ static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
};
/// EnumConstantDecl - An instance of this object exists for each enum constant
@@ -1625,7 +1758,7 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const TypeDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= TypeFirst && K <= TypeLast; }
+ static bool classofKind(Kind K) { return K >= firstType && K <= lastType; }
};
@@ -1715,10 +1848,7 @@ private:
// A struct representing syntactic qualifier info,
// to be used for the (uncommon) case of out-of-line declarations.
- struct ExtInfo {
- NestedNameSpecifier *NNS;
- SourceRange NNSRange;
- };
+ typedef QualifierInfo ExtInfo;
/// TypedefDeclOrQualifier - If the (out-of-line) tag declaration name
/// is qualified, it points to the qualifier info (nns and range);
@@ -1767,6 +1897,13 @@ public:
SourceLocation getTagKeywordLoc() const { return TagKeywordLoc; }
void setTagKeywordLoc(SourceLocation TKL) { TagKeywordLoc = TKL; }
+ /// getInnerLocStart - Return SourceLocation representing start of source
+ /// range ignoring outer template declarations.
+ virtual SourceLocation getInnerLocStart() const { return TagKeywordLoc; }
+
+ /// getOuterLocStart - Return SourceLocation representing start of source
+ /// range taking into account any outer template declarations.
+ SourceLocation getOuterLocStart() const;
virtual SourceRange getSourceRange() const;
virtual TagDecl* getCanonicalDecl();
@@ -1830,24 +1967,34 @@ public:
TypedefDecl *getTypedefForAnonDecl() const {
return hasExtInfo() ? 0 : TypedefDeclOrQualifier.get<TypedefDecl*>();
}
-
+
void setTypedefForAnonDecl(TypedefDecl *TDD);
-
+
NestedNameSpecifier *getQualifier() const {
- return hasExtInfo() ? TypedefDeclOrQualifier.get<ExtInfo*>()->NNS : 0;
+ return hasExtInfo() ? getExtInfo()->NNS : 0;
}
SourceRange getQualifierRange() const {
- return hasExtInfo()
- ? TypedefDeclOrQualifier.get<ExtInfo*>()->NNSRange
- : SourceRange();
+ return hasExtInfo() ? getExtInfo()->NNSRange : SourceRange();
}
void setQualifierInfo(NestedNameSpecifier *Qualifier,
SourceRange QualifierRange);
+ unsigned getNumTemplateParameterLists() const {
+ return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
+ }
+ TemplateParameterList *getTemplateParameterList(unsigned i) const {
+ assert(i < getNumTemplateParameterLists());
+ return getExtInfo()->TemplParamLists[i];
+ }
+ void setTemplateParameterListsInfo(ASTContext &Context, unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const TagDecl *D) { return true; }
- static bool classofKind(Kind K) { return K >= TagFirst && K <= TagLast; }
+ static bool classofKind(Kind K) { return K >= firstTag && K <= lastTag; }
static DeclContext *castToDeclContext(const TagDecl *D) {
return static_cast<DeclContext *>(const_cast<TagDecl*>(D));
@@ -1855,6 +2002,9 @@ public:
static TagDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<TagDecl *>(const_cast<DeclContext*>(DC));
}
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// EnumDecl - Represents an enum. As an extension, we allow forward-declared
@@ -1896,9 +2046,17 @@ public:
return cast<EnumDecl>(TagDecl::getCanonicalDecl());
}
+ const EnumDecl *getPreviousDeclaration() const {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDeclaration());
+ }
+ EnumDecl *getPreviousDeclaration() {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDeclaration());
+ }
+
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
SourceLocation TKL, EnumDecl *PrevDecl);
+ static EnumDecl *Create(ASTContext &C, EmptyShell Empty);
virtual void Destroy(ASTContext& C);
@@ -1917,11 +2075,17 @@ public:
typedef specific_decl_iterator<EnumConstantDecl> enumerator_iterator;
enumerator_iterator enumerator_begin() const {
- return enumerator_iterator(this->decls_begin());
+ const EnumDecl *E = cast_or_null<EnumDecl>(getDefinition());
+ if (!E)
+ E = this;
+ return enumerator_iterator(E->decls_begin());
}
enumerator_iterator enumerator_end() const {
- return enumerator_iterator(this->decls_end());
+ const EnumDecl *E = cast_or_null<EnumDecl>(getDefinition());
+ if (!E)
+ E = this;
+ return enumerator_iterator(E->decls_end());
}
/// getPromotionType - Return the integer type that enumerators
@@ -2010,6 +2174,14 @@ public:
SourceLocation L, IdentifierInfo *Id,
SourceLocation TKL = SourceLocation(),
RecordDecl* PrevDecl = 0);
+ static RecordDecl *Create(ASTContext &C, EmptyShell Empty);
+
+ const RecordDecl *getPreviousDeclaration() const {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDeclaration());
+ }
+ RecordDecl *getPreviousDeclaration() {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDeclaration());
+ }
virtual void Destroy(ASTContext& C);
@@ -2092,7 +2264,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const RecordDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= RecordFirst && K <= RecordLast;
+ return K >= firstRecord && K <= lastRecord;
}
};
@@ -2127,11 +2299,13 @@ class BlockDecl : public Decl, public DeclContext {
unsigned NumParams;
Stmt *Body;
+ TypeSourceInfo *SignatureAsWritten;
protected:
BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
: Decl(Block, DC, CaretLoc), DeclContext(Block),
- IsVariadic(false), ParamInfo(0), NumParams(0), Body(0) {}
+ IsVariadic(false), ParamInfo(0), NumParams(0), Body(0),
+ SignatureAsWritten(0) {}
virtual ~BlockDecl();
virtual void Destroy(ASTContext& C);
@@ -2148,6 +2322,9 @@ public:
Stmt *getBody() const { return (Stmt*) Body; }
void setBody(CompoundStmt *B) { Body = (Stmt*) B; }
+ void setSignatureAsWritten(TypeSourceInfo *Sig) { SignatureAsWritten = Sig; }
+ TypeSourceInfo *getSignatureAsWritten() const { return SignatureAsWritten; }
+
// Iterator access to formal parameters.
unsigned param_size() const { return getNumParams(); }
typedef ParmVarDecl **param_iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
index c15aeef..be30b8e 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -68,12 +68,13 @@ class Decl {
public:
/// \brief Lists the kind of concrete classes of Decl.
enum Kind {
-#define DECL(Derived, Base) Derived,
-#define DECL_RANGE(CommonBase, Start, End) \
- CommonBase##First = Start, CommonBase##Last = End,
-#define LAST_DECL_RANGE(CommonBase, Start, End) \
- CommonBase##First = Start, CommonBase##Last = End
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) DERIVED,
+#define ABSTRACT_DECL(DECL)
+#define DECL_RANGE(BASE, START, END) \
+ first##BASE = START, last##BASE = END,
+#define LAST_DECL_RANGE(BASE, START, END) \
+ first##BASE = START, last##BASE = END
+#include "clang/AST/DeclNodes.inc"
};
/// \brief A placeholder type used to construct an empty shell of a
@@ -91,7 +92,7 @@ public:
/// These are meant as bitmasks, so that searches in
/// C++ can look into the "tag" namespace during ordinary lookup.
///
- /// Decl currently provides 16 bits of IDNS bits.
+ /// Decl currently provides 15 bits of IDNS bits.
enum IdentifierNamespace {
/// Labels, declared with 'x:' and referenced with 'goto x'.
IDNS_Label = 0x0001,
@@ -224,10 +225,10 @@ protected:
// PCHLevel - the "level" of precompiled header/AST file from which this
// declaration was built.
- unsigned PCHLevel : 2;
+ unsigned PCHLevel : 3;
/// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
- unsigned IdentifierNamespace : 16;
+ unsigned IdentifierNamespace : 15;
private:
#ifndef NDEBUG
@@ -244,7 +245,15 @@ protected:
HasAttrs(false), Implicit(false), Used(false),
Access(AS_none), PCHLevel(0),
IdentifierNamespace(getIdentifierNamespaceForKind(DK)) {
- if (Decl::CollectingStats()) addDeclKind(DK);
+ if (Decl::CollectingStats()) add(DK);
+ }
+
+ Decl(Kind DK, EmptyShell Empty)
+ : NextDeclInContext(0), DeclKind(DK), InvalidDecl(0),
+ HasAttrs(false), Implicit(false), Used(false),
+ Access(AS_none), PCHLevel(0),
+ IdentifierNamespace(getIdentifierNamespaceForKind(DK)) {
+ if (Decl::CollectingStats()) add(DK);
}
virtual ~Decl();
@@ -296,6 +305,7 @@ public:
}
bool hasAttrs() const { return HasAttrs; }
+ void initAttrs(Attr *attrs);
void addAttr(Attr *attr);
const Attr *getAttrs() const {
if (!HasAttrs) return 0; // common case, no attributes.
@@ -328,7 +338,11 @@ public:
/// \brief Whether this declaration was used, meaning that a definition
/// is required.
- bool isUsed() const;
+ ///
+ /// \param CheckUsedAttr When true, also consider the "used" attribute
+ /// (in addition to the "used" bit set by \c setUsed()) when determining
+ /// whether the function is used.
+ bool isUsed(bool CheckUsedAttr = true) const;
void setUsed(bool U = true) { Used = U; }
@@ -344,14 +358,14 @@ public:
unsigned getPCHLevel() const { return PCHLevel; }
/// \brief The maximum PCH level that any declaration may have.
- static const unsigned MaxPCHLevel = 3;
-
+ static const unsigned MaxPCHLevel = 7;
+
/// \brief Set the PCH level of this declaration.
void setPCHLevel(unsigned Level) {
- assert(Level < MaxPCHLevel && "PCH level exceeds the maximum");
+ assert(Level <= MaxPCHLevel && "PCH level exceeds the maximum");
PCHLevel = Level;
}
-
+
unsigned getIdentifierNamespace() const {
return IdentifierNamespace;
}
@@ -474,15 +488,16 @@ public:
/// top-level Stmt* of that body. Otherwise this method returns null.
virtual Stmt* getBody() const { return 0; }
- /// getCompoundBody - Returns getBody(), dyn_casted to a CompoundStmt.
- CompoundStmt* getCompoundBody() const;
+ /// \brief Returns true if this Decl represents a declaration for a body of
+ /// code, such as a function or method definition.
+ virtual bool hasBody() const { return getBody() != 0; }
/// getBodyRBrace - Gets the right brace of the body, if a body exists.
/// This works whether the body is a CompoundStmt or a CXXTryStmt.
SourceLocation getBodyRBrace() const;
// global temp stats (until we have a per-module visitor)
- static void addDeclKind(Kind k);
+ static void add(Kind k);
static bool CollectingStats(bool Enable = false);
static void PrintStats();
@@ -631,6 +646,8 @@ class DeclContext {
/// another pointer.
mutable Decl *LastDecl;
+ friend class ExternalASTSource;
+
protected:
DeclContext(Decl::Kind K)
: DeclKind(K), ExternalLexicalStorage(false),
@@ -687,7 +704,7 @@ public:
case Decl::ObjCMethod:
return true;
default:
- return DeclKind >= Decl::FunctionFirst && DeclKind <= Decl::FunctionLast;
+ return DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction;
}
}
@@ -700,7 +717,7 @@ public:
}
bool isRecord() const {
- return DeclKind >= Decl::RecordFirst && DeclKind <= Decl::RecordLast;
+ return DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord;
}
bool isNamespace() const {
@@ -1083,9 +1100,10 @@ public:
static bool classof(const Decl *D);
static bool classof(const DeclContext *D) { return true; }
-#define DECL_CONTEXT(Name) \
- static bool classof(const Name##Decl *D) { return true; }
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ static bool classof(const NAME##Decl *D) { return true; }
+#include "clang/AST/DeclNodes.inc"
void dumpDeclContext() const;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
index c19c200..41474ab 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -92,6 +92,53 @@ namespace llvm {
namespace clang {
+/// AccessSpecDecl - An access specifier followed by colon ':'.
+///
+/// An objects of this class represents sugar for the syntactic occurrence
+/// of an access specifier followed by a colon in the list of member
+/// specifiers of a C++ class definition.
+///
+/// Note that they do not represent other uses of access specifiers,
+/// such as those occurring in a list of base specifiers.
+/// Also note that this class has nothing to do with so-called
+/// "access declarations" (C++98 11.3 [class.access.dcl]).
+class AccessSpecDecl : public Decl {
+ /// ColonLoc - The location of the ':'.
+ SourceLocation ColonLoc;
+
+ AccessSpecDecl(AccessSpecifier AS, DeclContext *DC,
+ SourceLocation ASLoc, SourceLocation ColonLoc)
+ : Decl(AccessSpec, DC, ASLoc), ColonLoc(ColonLoc) {
+ setAccess(AS);
+ }
+public:
+ /// getAccessSpecifierLoc - The location of the access specifier.
+ SourceLocation getAccessSpecifierLoc() const { return getLocation(); }
+ /// setAccessSpecifierLoc - Sets the location of the access specifier.
+ void setAccessSpecifierLoc(SourceLocation ASLoc) { setLocation(ASLoc); }
+
+ /// getColonLoc - The location of the colon following the access specifier.
+ SourceLocation getColonLoc() const { return ColonLoc; }
+ /// setColonLoc - Sets the location of the colon.
+ void setColonLoc(SourceLocation CLoc) { ColonLoc = CLoc; }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(getAccessSpecifierLoc(), getColonLoc());
+ }
+
+ static AccessSpecDecl *Create(ASTContext &C, AccessSpecifier AS,
+ DeclContext *DC, SourceLocation ASLoc,
+ SourceLocation ColonLoc) {
+ return new (C) AccessSpecDecl(AS, DC, ASLoc, ColonLoc);
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const AccessSpecDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == AccessSpec; }
+};
+
+
/// CXXBaseSpecifier - A base class of a C++ class.
///
/// Each CXXBaseSpecifier represents a single, direct base class (or
@@ -271,7 +318,20 @@ class CXXRecordDecl : public RecordDecl {
/// ComputedVisibleConversions - True when visible conversion functions are
/// already computed and are available.
bool ComputedVisibleConversions : 1;
-
+
+ /// \brief Whether we have already declared the default constructor or
+ /// do not need to have one declared.
+ bool DeclaredDefaultConstructor : 1;
+
+ /// \brief Whether we have already declared the copy constructor.
+ bool DeclaredCopyConstructor : 1;
+
+ /// \brief Whether we have already declared the copy-assignment operator.
+ bool DeclaredCopyAssignment : 1;
+
+ /// \brief Whether we have already declared a destructor within the class.
+ bool DeclaredDestructor : 1;
+
/// Bases - Base classes of this class.
/// FIXME: This is wasted space for a union.
CXXBaseSpecifier *Bases;
@@ -367,6 +427,13 @@ public:
virtual const CXXRecordDecl *getCanonicalDecl() const {
return cast<CXXRecordDecl>(RecordDecl::getCanonicalDecl());
}
+
+ const CXXRecordDecl *getPreviousDeclaration() const {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDeclaration());
+ }
+ CXXRecordDecl *getPreviousDeclaration() {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDeclaration());
+ }
CXXRecordDecl *getDefinition() const {
if (!DefinitionData) return 0;
@@ -380,6 +447,7 @@ public:
SourceLocation TKL = SourceLocation(),
CXXRecordDecl* PrevDecl=0,
bool DelayTypeCreation = false);
+ static CXXRecordDecl *Create(ASTContext &C, EmptyShell Empty);
virtual void Destroy(ASTContext& C);
@@ -476,6 +544,20 @@ public:
return data().FirstFriend != 0;
}
+ /// \brief Determine whether this class has had its default constructor
+ /// declared implicitly or does not need one declared implicitly.
+ ///
+ /// This value is used for lazy creation of default constructors.
+ bool hasDeclaredDefaultConstructor() const {
+ return data().DeclaredDefaultConstructor;
+ }
+
+ /// \brief Note whether this class has already had its default constructor
+ /// implicitly declared or doesn't need one.
+ void setDeclaredDefaultConstructor(bool DDC) {
+ data().DeclaredDefaultConstructor = DDC;
+ }
+
/// hasConstCopyConstructor - Determines whether this class has a
/// copy constructor that accepts a const-qualified argument.
bool hasConstCopyConstructor(ASTContext &Context) const;
@@ -484,12 +566,18 @@ public:
CXXConstructorDecl *getCopyConstructor(ASTContext &Context,
unsigned TypeQuals) const;
- /// hasConstCopyAssignment - Determines whether this class has a
- /// copy assignment operator that accepts a const-qualified argument.
- /// It returns its decl in MD if found.
- bool hasConstCopyAssignment(ASTContext &Context,
- const CXXMethodDecl *&MD) const;
-
+ /// \brief Retrieve the copy-assignment operator for this class, if available.
+ ///
+ /// This routine attempts to find the copy-assignment operator for this
+ /// class, using a simplistic form of overload resolution.
+ ///
+ /// \param ArgIsConst Whether the argument to the copy-assignment operator
+ /// is const-qualified.
+ ///
+ /// \returns The copy-assignment operator that can be invoked, or NULL if
+ /// a unique copy-assignment operator could not be found.
+ CXXMethodDecl *getCopyAssignmentOperator(bool ArgIsConst) const;
+
/// addedConstructor - Notify the class that another constructor has
/// been added. This routine helps maintain information about the
/// class based on which constructors have been added.
@@ -509,9 +597,23 @@ public:
return data().UserDeclaredCopyConstructor;
}
+ /// \brief Determine whether this class has had its copy constructor
+ /// declared, either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of copy constructors.
+ bool hasDeclaredCopyConstructor() const {
+ return data().DeclaredCopyConstructor;
+ }
+
+ /// \brief Note whether this class has already had its copy constructor
+ /// declared.
+ void setDeclaredCopyConstructor(bool DCC) {
+ data().DeclaredCopyConstructor = DCC;
+ }
+
/// addedAssignmentOperator - Notify the class that another assignment
/// operator has been added. This routine helps maintain information about the
- /// class based on which operators have been added.
+ /// class based on which operators have been added.
void addedAssignmentOperator(ASTContext &Context, CXXMethodDecl *OpDecl);
/// hasUserDeclaredCopyAssignment - Whether this class has a
@@ -521,6 +623,20 @@ public:
return data().UserDeclaredCopyAssignment;
}
+ /// \brief Determine whether this class has had its copy assignment operator
+ /// declared, either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of copy assignment operators.
+ bool hasDeclaredCopyAssignment() const {
+ return data().DeclaredCopyAssignment;
+ }
+
+ /// \brief Note whether this class has already had its copy assignment
+ /// operator declared.
+ void setDeclaredCopyAssignment(bool DCA) {
+ data().DeclaredCopyAssignment = DCA;
+ }
+
/// hasUserDeclaredDestructor - Whether this class has a
/// user-declared destructor. When false, a destructor will be
/// implicitly declared.
@@ -533,8 +649,21 @@ public:
/// fully defined, a destructor will be implicitly declared.
void setUserDeclaredDestructor(bool UCD) {
data().UserDeclaredDestructor = UCD;
+ if (UCD)
+ data().DeclaredDestructor = true;
}
+ /// \brief Determine whether this class has had its destructor declared,
+ /// either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of destructors.
+ bool hasDeclaredDestructor() const { return data().DeclaredDestructor; }
+
+ /// \brief Note whether this class has already had its destructor declared.
+ void setDeclaredDestructor(bool DD) {
+ data().DeclaredDestructor = DD;
+ }
+
/// getConversions - Retrieve the overload set containing all of the
/// conversion functions in this class.
UnresolvedSetImpl *getConversionFunctions() {
@@ -726,10 +855,10 @@ public:
void setTemplateSpecializationKind(TemplateSpecializationKind TSK);
/// getDefaultConstructor - Returns the default constructor for this class
- CXXConstructorDecl *getDefaultConstructor(ASTContext &Context);
+ CXXConstructorDecl *getDefaultConstructor();
/// getDestructor - Returns the destructor decl for this class.
- CXXDestructorDecl *getDestructor(ASTContext &Context) const;
+ CXXDestructorDecl *getDestructor() const;
/// isLocalClass - If the class is a local class [class.local], returns
/// the enclosing function declaration.
@@ -920,14 +1049,15 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
- return K == CXXRecord ||
- K == ClassTemplateSpecialization ||
- K == ClassTemplatePartialSpecialization;
+ return K >= firstCXXRecord && K <= lastCXXRecord;
}
static bool classof(const CXXRecordDecl *D) { return true; }
static bool classof(const ClassTemplateSpecializationDecl *D) {
return true;
}
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// CXXMethodDecl - Represents a static or instance method of a
@@ -984,6 +1114,7 @@ public:
method_iterator begin_overridden_methods() const;
method_iterator end_overridden_methods() const;
+ unsigned size_overridden_methods() const;
/// getParent - Returns the parent of this method declaration, which
/// is the class in which this method is defined.
@@ -1012,7 +1143,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXMethodDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= CXXMethod && K <= CXXConversion;
+ return K >= firstCXXMethod && K <= lastCXXMethod;
}
};
@@ -1387,6 +1518,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXConstructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConstructor; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// CXXDestructorDecl - Represents a C++ destructor within a
@@ -1450,6 +1584,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXDestructorDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXDestructor; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// CXXConversionDecl - Represents a C++ conversion function within a
@@ -1504,6 +1641,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const CXXConversionDecl *D) { return true; }
static bool classofKind(Kind K) { return K == CXXConversion; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// LinkageSpecDecl - This represents a linkage specification. For example:
@@ -1607,7 +1747,7 @@ class UsingDirectiveDecl : public NamedDecl {
SourceLocation IdentLoc,
NamedDecl *Nominated,
DeclContext *CommonAncestor)
- : NamedDecl(Decl::UsingDirective, DC, L, getName()),
+ : NamedDecl(UsingDirective, DC, L, getName()),
NamespaceLoc(NamespcLoc), QualifierRange(QualifierRange),
Qualifier(Qualifier), IdentLoc(IdentLoc),
NominatedNamespace(Nominated),
@@ -1680,7 +1820,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UsingDirectiveDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::UsingDirective; }
+ static bool classofKind(Kind K) { return K == UsingDirective; }
// Friend for getUsingDirectiveName.
friend class DeclContext;
@@ -1714,7 +1854,7 @@ class NamespaceAliasDecl : public NamedDecl {
SourceRange QualifierRange,
NestedNameSpecifier *Qualifier,
SourceLocation IdentLoc, NamedDecl *Namespace)
- : NamedDecl(Decl::NamespaceAlias, DC, L, Alias), AliasLoc(AliasLoc),
+ : NamedDecl(NamespaceAlias, DC, L, Alias), AliasLoc(AliasLoc),
QualifierRange(QualifierRange), Qualifier(Qualifier),
IdentLoc(IdentLoc), Namespace(Namespace) { }
@@ -1786,7 +1926,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const NamespaceAliasDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::NamespaceAlias; }
+ static bool classofKind(Kind K) { return K == NamespaceAlias; }
};
/// UsingShadowDecl - Represents a shadow declaration introduced into
@@ -1809,9 +1949,12 @@ class UsingShadowDecl : public NamedDecl {
UsingShadowDecl(DeclContext *DC, SourceLocation Loc, UsingDecl *Using,
NamedDecl *Target)
- : NamedDecl(UsingShadow, DC, Loc, Target->getDeclName()),
+ : NamedDecl(UsingShadow, DC, Loc, DeclarationName()),
Underlying(Target), Using(Using) {
- IdentifierNamespace = Target->getIdentifierNamespace();
+ if (Target) {
+ setDeclName(Target->getDeclName());
+ IdentifierNamespace = Target->getIdentifierNamespace();
+ }
setImplicit();
}
@@ -1828,7 +1971,11 @@ public:
/// \brief Sets the underlying declaration which has been brought into the
/// local scope.
- void setTargetDecl(NamedDecl* ND) { Underlying = ND; }
+ void setTargetDecl(NamedDecl* ND) {
+ assert(ND && "Target decl is null!");
+ Underlying = ND;
+ IdentifierNamespace = ND->getIdentifierNamespace();
+ }
/// \brief Gets the using declaration to which this declaration is tied.
UsingDecl *getUsingDecl() const { return Using; }
@@ -1866,7 +2013,7 @@ class UsingDecl : public NamedDecl {
UsingDecl(DeclContext *DC, SourceLocation L, SourceRange NNR,
SourceLocation UL, NestedNameSpecifier* TargetNNS,
DeclarationName Name, bool IsTypeNameArg)
- : NamedDecl(Decl::Using, DC, L, Name),
+ : NamedDecl(Using, DC, L, Name),
NestedNameRange(NNR), UsingLocation(UL), TargetNestedName(TargetNNS),
IsTypeName(IsTypeNameArg) {
}
@@ -1934,7 +2081,10 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UsingDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::Using; }
+ static bool classofKind(Kind K) { return K == Using; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// UnresolvedUsingValueDecl - Represents a dependent using
@@ -1960,7 +2110,7 @@ class UnresolvedUsingValueDecl : public ValueDecl {
NestedNameSpecifier *TargetNNS,
SourceLocation TargetNameLoc,
DeclarationName TargetName)
- : ValueDecl(Decl::UnresolvedUsingValue, DC, TargetNameLoc, TargetName, Ty),
+ : ValueDecl(UnresolvedUsingValue, DC, TargetNameLoc, TargetName, Ty),
TargetNestedNameRange(TargetNNR), UsingLocation(UsingLoc),
TargetNestedNameSpecifier(TargetNNS)
{ }
@@ -1997,7 +2147,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UnresolvedUsingValueDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::UnresolvedUsingValue; }
+ static bool classofKind(Kind K) { return K == UnresolvedUsingValue; }
};
/// UnresolvedUsingTypenameDecl - Represents a dependent using
@@ -2026,7 +2176,7 @@ class UnresolvedUsingTypenameDecl : public TypeDecl {
SourceLocation TypenameLoc,
SourceRange TargetNNR, NestedNameSpecifier *TargetNNS,
SourceLocation TargetNameLoc, IdentifierInfo *TargetName)
- : TypeDecl(Decl::UnresolvedUsingTypename, DC, TargetNameLoc, TargetName),
+ : TypeDecl(UnresolvedUsingTypename, DC, TargetNameLoc, TargetName),
TargetNestedNameRange(TargetNNR), UsingLocation(UsingLoc),
TypenameLocation(TypenameLoc), TargetNestedNameSpecifier(TargetNNS)
{ }
@@ -2070,7 +2220,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const UnresolvedUsingTypenameDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::UnresolvedUsingTypename; }
+ static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
};
/// StaticAssertDecl - Represents a C++0x static_assert declaration.
@@ -2098,7 +2248,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(StaticAssertDecl *D) { return true; }
- static bool classofKind(Kind K) { return K == Decl::StaticAssert; }
+ static bool classofKind(Kind K) { return K == StaticAssert; }
};
/// Insertion operator for diagnostics. This allows sending AccessSpecifier's
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
index a20625d..2807d16 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -59,10 +59,14 @@ private:
FriendLoc(FriendL) {
}
+ explicit FriendDecl(EmptyShell Empty)
+ : Decl(Decl::Friend, Empty), NextFriend(0) { }
+
public:
static FriendDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, FriendUnion Friend_,
SourceLocation FriendL);
+ static FriendDecl *Create(ASTContext &C, EmptyShell Empty);
/// If this friend declaration names an (untemplated but
/// possibly dependent) type, return the type; otherwise
@@ -87,6 +91,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FriendDecl *D) { return true; }
static bool classofKind(Kind K) { return K == Decl::Friend; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// An iterator over the friend declarations of a class.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
index 97d1656..30f63d8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -239,6 +239,12 @@ public:
QualType getResultType() const { return MethodDeclType; }
void setResultType(QualType T) { MethodDeclType = T; }
+ /// \brief Determine the type of an expression that sends a message to this
+ /// function.
+ QualType getSendResultType() const {
+ return getResultType().getNonLValueExprType(getASTContext());
+ }
+
TypeSourceInfo *getResultTypeSourceInfo() const { return ResultTInfo; }
void setResultTypeSourceInfo(TypeSourceInfo *TInfo) { ResultTInfo = TInfo; }
@@ -417,8 +423,8 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCContainerDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= ObjCContainerFirst &&
- K <= ObjCContainerLast;
+ return K >= firstObjCContainer &&
+ K <= lastObjCContainer;
}
static DeclContext *castToDeclContext(const ObjCContainerDecl *D) {
@@ -550,8 +556,8 @@ public:
void setCategoryList(ObjCCategoryDecl *category) {
CategoryList = category;
}
-
- ObjCCategoryDecl* getClassExtension() const;
+
+ ObjCCategoryDecl* getFirstClassExtension() const;
ObjCPropertyDecl
*FindPropertyVisibleInPrimaryClass(IdentifierInfo *PropertyId) const;
@@ -983,6 +989,7 @@ public:
}
bool IsClassExtension() const { return getIdentifier() == 0; }
+ const ObjCCategoryDecl *getNextClassExtension() const;
typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
ivar_iterator ivar_begin() const {
@@ -1059,7 +1066,7 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const ObjCImplDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= ObjCImplFirst && K <= ObjCImplLast;
+ return K >= firstObjCImpl && K <= lastObjCImpl;
}
};
@@ -1306,9 +1313,9 @@ public:
enum PropertyControl { None, Required, Optional };
private:
SourceLocation AtLoc; // location of @property
- QualType DeclType;
+ TypeSourceInfo *DeclType;
unsigned PropertyAttributes : 8;
-
+ unsigned PropertyAttributesAsWritten : 8;
// @required/@optional
unsigned PropertyImplementation : 2;
@@ -1320,9 +1327,11 @@ private:
ObjCIvarDecl *PropertyIvarDecl; // Synthesize ivar for this property
ObjCPropertyDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
- SourceLocation AtLocation, QualType T)
+ SourceLocation AtLocation, TypeSourceInfo *T)
: NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation), DeclType(T),
- PropertyAttributes(OBJC_PR_noattr), PropertyImplementation(None),
+ PropertyAttributes(OBJC_PR_noattr),
+ PropertyAttributesAsWritten(OBJC_PR_noattr),
+ PropertyImplementation(None),
GetterName(Selector()),
SetterName(Selector()),
GetterMethodDecl(0), SetterMethodDecl(0) , PropertyIvarDecl(0) {}
@@ -1330,13 +1339,14 @@ public:
static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id, SourceLocation AtLocation,
- QualType T,
+ TypeSourceInfo *T,
PropertyControl propControl = None);
SourceLocation getAtLoc() const { return AtLoc; }
void setAtLoc(SourceLocation L) { AtLoc = L; }
- QualType getType() const { return DeclType; }
- void setType(QualType T) { DeclType = T; }
+ TypeSourceInfo *getTypeSourceInfo() const { return DeclType; }
+ QualType getType() const { return DeclType->getType(); }
+ void setType(TypeSourceInfo *T) { DeclType = T; }
PropertyAttributeKind getPropertyAttributes() const {
return PropertyAttributeKind(PropertyAttributes);
@@ -1345,6 +1355,14 @@ public:
PropertyAttributes |= PRVal;
}
+ PropertyAttributeKind getPropertyAttributesAsWritten() const {
+ return PropertyAttributeKind(PropertyAttributesAsWritten);
+ }
+
+ void setPropertyAttributesAsWritten(PropertyAttributeKind PRVal) {
+ PropertyAttributesAsWritten = PRVal;
+ }
+
void makeitReadWriteAttribute(void) {
PropertyAttributes &= ~OBJC_PR_readonly;
PropertyAttributes |= OBJC_PR_readwrite;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
index b7b90b1..135dd3a 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -180,18 +180,29 @@ public:
TemplateArgumentListBuilder &Builder,
bool TakeArgs);
+ /// TemplateArgumentList - It copies the template arguments into a locally
+ /// new[]'d array.
+ TemplateArgumentList(ASTContext &Context,
+ const TemplateArgument *Args, unsigned NumArgs);
+
/// Produces a shallow copy of the given template argument list. This
/// assumes that the input argument list outlives it. This takes the list as
/// a pointer to avoid looking like a copy constructor, since this really
/// really isn't safe to use that way.
explicit TemplateArgumentList(const TemplateArgumentList *Other);
-
+
+ TemplateArgumentList() : NumFlatArguments(0), NumStructuredArguments(0) { }
+
/// Used to release the memory associated with a TemplateArgumentList
/// object. FIXME: This is currently not called anywhere, but the
/// memory will still be freed when using a BumpPtrAllocator.
void Destroy(ASTContext &C);
~TemplateArgumentList();
+
+ /// \brief Copies the template arguments into a locally new[]'d array.
+ void init(ASTContext &Context,
+ const TemplateArgument *Args, unsigned NumArgs);
/// \brief Retrieve the template argument at a given index.
const TemplateArgument &get(unsigned Idx) const {
@@ -261,12 +272,27 @@ public:
static bool classof(const ClassTemplateDecl *D) { return true; }
static bool classof(const TemplateTemplateParmDecl *D) { return true; }
static bool classofKind(Kind K) {
- return K >= TemplateFirst && K <= TemplateLast;
+ return K >= firstTemplate && K <= lastTemplate;
+ }
+
+ SourceRange getSourceRange() const {
+ return SourceRange(TemplateParams->getTemplateLoc(),
+ TemplatedDecl->getSourceRange().getEnd());
}
protected:
NamedDecl *TemplatedDecl;
TemplateParameterList* TemplateParams;
+
+public:
+ /// \brief Initialize the underlying templated declaration and
+ /// template parameters.
+ void init(NamedDecl *templatedDecl, TemplateParameterList* templateParams) {
+ assert(TemplatedDecl == 0 && "TemplatedDecl already set!");
+ assert(TemplateParams == 0 && "TemplateParams already set!");
+ TemplatedDecl = templatedDecl;
+ TemplateParams = templateParams;
+ }
};
/// \brief Provides information about a function template specialization,
@@ -353,8 +379,9 @@ class MemberSpecializationInfo {
public:
explicit
- MemberSpecializationInfo(NamedDecl *IF, TemplateSpecializationKind TSK)
- : MemberAndTSK(IF, TSK - 1), PointOfInstantiation() {
+ MemberSpecializationInfo(NamedDecl *IF, TemplateSpecializationKind TSK,
+ SourceLocation POI = SourceLocation())
+ : MemberAndTSK(IF, TSK - 1), PointOfInstantiation(POI) {
assert(TSK != TSK_Undeclared &&
"Cannot encode undeclared template specializations for members");
}
@@ -602,6 +629,9 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const FunctionTemplateDecl *D) { return true; }
static bool classofKind(Kind K) { return K == FunctionTemplate; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
//===----------------------------------------------------------------------===//
@@ -634,9 +664,11 @@ protected:
public:
/// Get the nesting depth of the template parameter.
unsigned getDepth() const { return Depth; }
+ void setDepth(unsigned D) { Depth = D; }
/// Get the position of the template parameter within its parameter list.
unsigned getPosition() const { return Position; }
+ void setPosition(unsigned P) { Position = P; }
/// Get the index of the template parameter within its parameter list.
unsigned getIndex() const { return Position; }
@@ -675,6 +707,7 @@ public:
SourceLocation L, unsigned D, unsigned P,
IdentifierInfo *Id, bool Typename,
bool ParameterPack);
+ static TemplateTypeParmDecl *Create(ASTContext &C, EmptyShell Empty);
/// \brief Whether this template type parameter was declared with
/// the 'typename' keyword. If not, it was declared with the 'class'
@@ -711,6 +744,13 @@ public:
DefaultArgument = 0;
InheritedDefault = false;
}
+
+ /// \brief Set whether this template type parameter was declared with
+ /// the 'typename' or 'class' keyword.
+ void setDeclaredWithTypename(bool withTypename) { Typename = withTypename; }
+
+ /// \brief Set whether this is a parameter pack.
+ void setParameterPack(bool isParamPack) { ParameterPack = isParamPack; }
/// \brief Retrieve the depth of the template parameter.
unsigned getDepth() const;
@@ -734,15 +774,16 @@ public:
/// @endcode
class NonTypeTemplateParmDecl
: public VarDecl, protected TemplateParmPosition {
- /// \brief The default template argument, if any.
- Expr *DefaultArgument;
+ /// \brief The default template argument, if any, and whether or not
+ /// it was inherited.
+ llvm::PointerIntPair<Expr*, 1, bool> DefaultArgumentAndInherited;
NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation L, unsigned D,
unsigned P, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo)
: VarDecl(NonTypeTemplateParm, DC, L, Id, T, TInfo, VarDecl::None,
VarDecl::None),
- TemplateParmPosition(D, P), DefaultArgument(0)
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false)
{ }
public:
@@ -751,22 +792,43 @@ public:
unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo);
using TemplateParmPosition::getDepth;
+ using TemplateParmPosition::setDepth;
using TemplateParmPosition::getPosition;
+ using TemplateParmPosition::setPosition;
using TemplateParmPosition::getIndex;
/// \brief Determine whether this template parameter has a default
/// argument.
- bool hasDefaultArgument() const { return DefaultArgument; }
+ bool hasDefaultArgument() const {
+ return DefaultArgumentAndInherited.getPointer() != 0;
+ }
/// \brief Retrieve the default argument, if any.
- Expr *getDefaultArgument() const { return DefaultArgument; }
+ Expr *getDefaultArgument() const {
+ return DefaultArgumentAndInherited.getPointer();
+ }
/// \brief Retrieve the location of the default argument, if any.
SourceLocation getDefaultArgumentLoc() const;
- /// \brief Set the default argument for this template parameter.
- void setDefaultArgument(Expr *DefArg) {
- DefaultArgument = DefArg;
+ /// \brief Determines whether the default argument was inherited
+ /// from a previous declaration of this template.
+ bool defaultArgumentWasInherited() const {
+ return DefaultArgumentAndInherited.getInt();
+ }
+
+ /// \brief Set the default argument for this template parameter, and
+ /// whether that default argument was inherited from another
+ /// declaration.
+ void setDefaultArgument(Expr *DefArg, bool Inherited) {
+ DefaultArgumentAndInherited.setPointer(DefArg);
+ DefaultArgumentAndInherited.setInt(Inherited);
+ }
+
+ /// \brief Removes the default argument of this template parameter.
+ void removeDefaultArgument() {
+ DefaultArgumentAndInherited.setPointer(0);
+ DefaultArgumentAndInherited.setInt(false);
}
// Implement isa/cast/dyncast/etc.
@@ -785,14 +847,17 @@ public:
class TemplateTemplateParmDecl
: public TemplateDecl, protected TemplateParmPosition {
- /// \brief The default template argument, if any.
+ /// DefaultArgument - The default template argument, if any.
TemplateArgumentLoc DefaultArgument;
+ /// Whether or not the default argument was inherited.
+ bool DefaultArgumentWasInherited;
TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
unsigned D, unsigned P,
IdentifierInfo *Id, TemplateParameterList *Params)
: TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
- TemplateParmPosition(D, P), DefaultArgument()
+ TemplateParmPosition(D, P), DefaultArgument(),
+ DefaultArgumentWasInherited(false)
{ }
public:
@@ -807,24 +872,45 @@ public:
/// \brief Determine whether this template parameter has a default
/// argument.
- bool hasDefaultArgument() const {
- return !DefaultArgument.getArgument().isNull();
+ bool hasDefaultArgument() const {
+ return !DefaultArgument.getArgument().isNull();
}
/// \brief Retrieve the default argument, if any.
- const TemplateArgumentLoc &getDefaultArgument() const {
- return DefaultArgument;
+ const TemplateArgumentLoc &getDefaultArgument() const {
+ return DefaultArgument;
+ }
+
+ /// \brief Retrieve the location of the default argument, if any.
+ SourceLocation getDefaultArgumentLoc() const;
+
+ /// \brief Determines whether the default argument was inherited
+ /// from a previous declaration of this template.
+ bool defaultArgumentWasInherited() const {
+ return DefaultArgumentWasInherited;
}
- /// \brief Set the default argument for this template parameter.
- void setDefaultArgument(const TemplateArgumentLoc &DefArg) {
+ /// \brief Set the default argument for this template parameter, and
+ /// whether that default argument was inherited from another
+ /// declaration.
+ void setDefaultArgument(const TemplateArgumentLoc &DefArg, bool Inherited) {
DefaultArgument = DefArg;
+ DefaultArgumentWasInherited = Inherited;
+ }
+
+ /// \brief Removes the default argument of this template parameter.
+ void removeDefaultArgument() {
+ DefaultArgument = TemplateArgumentLoc();
+ DefaultArgumentWasInherited = false;
}
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classof(const TemplateTemplateParmDecl *D) { return true; }
static bool classofKind(Kind K) { return K == TemplateTemplateParm; }
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// \brief Represents a class template specialization, which refers to
@@ -860,9 +946,22 @@ class ClassTemplateSpecializationDecl
llvm::PointerUnion<ClassTemplateDecl *, SpecializedPartialSpecialization *>
SpecializedTemplate;
- /// \brief The type-as-written of an explicit template specialization.
+ /// \brief Further info for explicit template specialization/instantiation.
+ struct ExplicitSpecializationInfo {
+ /// \brief The type-as-written.
+ TypeSourceInfo *TypeAsWritten;
+ /// \brief The location of the extern keyword.
+ SourceLocation ExternLoc;
+ /// \brief The location of the template keyword.
+ SourceLocation TemplateKeywordLoc;
+
+ ExplicitSpecializationInfo()
+ : TypeAsWritten(0), ExternLoc(), TemplateKeywordLoc() {}
+ };
+
+ /// \brief Further info for explicit template specialization/instantiation.
/// Does not apply to implicit specializations.
- TypeSourceInfo *TypeAsWritten;
+ ExplicitSpecializationInfo *ExplicitInfo;
/// \brief The template arguments used to describe this specialization.
TemplateArgumentList TemplateArgs;
@@ -881,12 +980,16 @@ protected:
TemplateArgumentListBuilder &Builder,
ClassTemplateSpecializationDecl *PrevDecl);
+ explicit ClassTemplateSpecializationDecl(Kind DK);
+
public:
static ClassTemplateSpecializationDecl *
Create(ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
TemplateArgumentListBuilder &Builder,
ClassTemplateSpecializationDecl *PrevDecl);
+ static ClassTemplateSpecializationDecl *
+ Create(ASTContext &Context, EmptyShell Empty);
virtual void Destroy(ASTContext& C);
@@ -903,6 +1006,14 @@ public:
return TemplateArgs;
}
+ /// \brief Initialize the template arguments of the class template
+ /// specialization.
+ void initTemplateArgs(TemplateArgument *Args, unsigned NumArgs) {
+ assert(TemplateArgs.flat_size() == 0 &&
+ "Template arguments already initialized!");
+ TemplateArgs.init(getASTContext(), Args, NumArgs);
+ }
+
/// \brief Determine the kind of specialization that this
/// declaration represents.
TemplateSpecializationKind getSpecializationKind() const {
@@ -943,6 +1054,19 @@ public:
SpecializedTemplate.get<ClassTemplateDecl*>());
}
+ /// \brief Retrieve the class template or class template partial
+ /// specialization which was specialized by this.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ getSpecializedTemplateOrPartial() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization;
+
+ return const_cast<ClassTemplateDecl*>(
+ SpecializedTemplate.get<ClassTemplateDecl*>());
+ }
+
/// \brief Retrieve the set of template arguments that should be used
/// to instantiate members of the class template or class template partial
/// specialization from which this class template specialization was
@@ -967,6 +1091,8 @@ public:
/// template arguments have been deduced.
void setInstantiationOf(ClassTemplatePartialSpecializationDecl *PartialSpec,
TemplateArgumentList *TemplateArgs) {
+ assert(!SpecializedTemplate.is<SpecializedPartialSpecialization*>() &&
+ "Already set to a class template partial specialization!");
SpecializedPartialSpecialization *PS
= new (getASTContext()) SpecializedPartialSpecialization();
PS->PartialSpecialization = PartialSpec;
@@ -974,17 +1100,59 @@ public:
SpecializedTemplate = PS;
}
+ /// \brief Note that this class template specialization is actually an
+ /// instantiation of the given class template partial specialization whose
+ /// template arguments have been deduced.
+ void setInstantiationOf(ClassTemplatePartialSpecializationDecl *PartialSpec,
+ TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ ASTContext &Ctx = getASTContext();
+ setInstantiationOf(PartialSpec,
+ new (Ctx) TemplateArgumentList(Ctx, TemplateArgs,
+ NumTemplateArgs));
+ }
+
+ /// \brief Note that this class template specialization is an instantiation
+ /// of the given class template.
+ void setInstantiationOf(ClassTemplateDecl *TemplDecl) {
+ assert(!SpecializedTemplate.is<SpecializedPartialSpecialization*>() &&
+ "Previously set to a class template partial specialization!");
+ SpecializedTemplate = TemplDecl;
+ }
+
/// \brief Sets the type of this specialization as it was written by
/// the user. This will be a class template specialization type.
void setTypeAsWritten(TypeSourceInfo *T) {
- TypeAsWritten = T;
+ if (!ExplicitInfo) ExplicitInfo = new ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = T;
}
-
/// \brief Gets the type of this specialization as it was written by
/// the user, if it was so written.
TypeSourceInfo *getTypeAsWritten() const {
- return TypeAsWritten;
+ return ExplicitInfo ? ExplicitInfo->TypeAsWritten : 0;
+ }
+
+ /// \brief Gets the location of the extern keyword, if present.
+ SourceLocation getExternLoc() const {
+ return ExplicitInfo ? ExplicitInfo->ExternLoc : SourceLocation();
+ }
+ /// \brief Sets the location of the extern keyword.
+ void setExternLoc(SourceLocation Loc) {
+ if (!ExplicitInfo) ExplicitInfo = new ExplicitSpecializationInfo;
+ ExplicitInfo->ExternLoc = Loc;
+ }
+
+ /// \brief Sets the location of the template keyword.
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ if (!ExplicitInfo) ExplicitInfo = new ExplicitSpecializationInfo;
+ ExplicitInfo->TemplateKeywordLoc = Loc;
}
+ /// \brief Gets the location of the template keyword, if present.
+ SourceLocation getTemplateKeywordLoc() const {
+ return ExplicitInfo ? ExplicitInfo->TemplateKeywordLoc : SourceLocation();
+ }
+
+ SourceLocation getInnerLocStart() const { return getTemplateKeywordLoc(); }
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, TemplateArgs.getFlatArgumentList(), TemplateArgs.flat_size(),
@@ -1001,8 +1169,8 @@ public:
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
- return K == ClassTemplateSpecialization ||
- K == ClassTemplatePartialSpecialization;
+ return K >= firstClassTemplateSpecialization &&
+ K <= lastClassTemplateSpecialization;
}
static bool classof(const ClassTemplateSpecializationDecl *) {
@@ -1053,6 +1221,12 @@ class ClassTemplatePartialSpecializationDecl
TemplateParams(Params), ArgsAsWritten(ArgInfos),
NumArgsAsWritten(NumArgInfos), SequenceNumber(SequenceNumber),
InstantiatedFromMember(0, false) { }
+
+ ClassTemplatePartialSpecializationDecl()
+ : ClassTemplateSpecializationDecl(ClassTemplatePartialSpecialization),
+ TemplateParams(0), ArgsAsWritten(0),
+ NumArgsAsWritten(0), SequenceNumber(0),
+ InstantiatedFromMember(0, false) { }
public:
static ClassTemplatePartialSpecializationDecl *
@@ -1065,16 +1239,26 @@ public:
ClassTemplatePartialSpecializationDecl *PrevDecl,
unsigned SequenceNumber);
+ static ClassTemplatePartialSpecializationDecl *
+ Create(ASTContext &Context, EmptyShell Empty);
+
/// Get the list of template parameters
TemplateParameterList *getTemplateParameters() const {
return TemplateParams;
}
+ void initTemplateParameters(TemplateParameterList *Params) {
+ assert(TemplateParams == 0 && "TemplateParams already set");
+ TemplateParams = Params;
+ }
+
/// Get the template arguments as written.
TemplateArgumentLoc *getTemplateArgsAsWritten() const {
return ArgsAsWritten;
}
+ void initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos);
+
/// Get the number of template arguments as written.
unsigned getNumTemplateArgsAsWritten() const {
return NumArgsAsWritten;
@@ -1083,6 +1267,7 @@ public:
/// \brief Get the sequence number for this class template partial
/// specialization.
unsigned getSequenceNumber() const { return SequenceNumber; }
+ void setSequenceNumber(unsigned N) { SequenceNumber = N; }
/// \brief Retrieve the member class template partial specialization from
/// which this particular class template partial specialization was
@@ -1199,26 +1384,19 @@ protected:
llvm::PointerIntPair<ClassTemplateDecl *, 1, bool> InstantiatedFromMember;
};
- // FIXME: Combine PreviousDeclaration with CommonPtr, as in
- // FunctionTemplateDecl.
-
- /// \brief Previous declaration of this class template.
- ClassTemplateDecl *PreviousDeclaration;
+ /// \brief A pointer to the previous declaration (if this is a redeclaration)
+ /// or to the data that is common to all declarations of this class template.
+ llvm::PointerUnion<Common*, ClassTemplateDecl*> CommonOrPrev;
- /// \brief Pointer to the data that is common to all of the
- /// declarations of this class template.
- ///
- /// The first declaration of a class template (e.g., the declaration
- /// with no "previous declaration") owns this pointer.
- Common *CommonPtr;
+ /// \brief Retrieves the "common" pointer shared by all
+ /// (re-)declarations of the same class template. Calling this routine
+ /// may implicitly allocate memory for the common pointer.
+ Common *getCommonPtr();
ClassTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
- TemplateParameterList *Params, NamedDecl *Decl,
- ClassTemplateDecl *PrevDecl, Common *CommonPtr)
+ TemplateParameterList *Params, NamedDecl *Decl)
: TemplateDecl(ClassTemplate, DC, L, Name, Params, Decl),
- PreviousDeclaration(PrevDecl), CommonPtr(CommonPtr) { }
-
- ~ClassTemplateDecl();
+ CommonOrPrev((Common*)0) { }
public:
/// Get the underlying class declarations of the template.
@@ -1226,13 +1404,30 @@ public:
return static_cast<CXXRecordDecl *>(TemplatedDecl);
}
- /// \brief Retrieve the previous declaration of this template.
- ClassTemplateDecl *getPreviousDeclaration() const {
- return PreviousDeclaration;
+ /// \brief Retrieve the previous declaration of this class template, or
+ /// NULL if no such declaration exists.
+ const ClassTemplateDecl *getPreviousDeclaration() const {
+ return CommonOrPrev.dyn_cast<ClassTemplateDecl*>();
+ }
+
+ /// \brief Retrieve the previous declaration of this function template, or
+ /// NULL if no such declaration exists.
+ ClassTemplateDecl *getPreviousDeclaration() {
+ return CommonOrPrev.dyn_cast<ClassTemplateDecl*>();
+ }
+
+ /// \brief Set the previous declaration of this class template.
+ void setPreviousDeclaration(ClassTemplateDecl *Prev) {
+ if (Prev)
+ CommonOrPrev = Prev;
}
virtual ClassTemplateDecl *getCanonicalDecl();
+ const ClassTemplateDecl *getCanonicalDecl() const {
+ return const_cast<ClassTemplateDecl*>(this)->getCanonicalDecl();
+ }
+
/// Create a class template node.
static ClassTemplateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
@@ -1243,14 +1438,14 @@ public:
/// \brief Retrieve the set of specializations of this class template.
llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations() {
- return CommonPtr->Specializations;
+ return getCommonPtr()->Specializations;
}
/// \brief Retrieve the set of partial specializations of this class
/// template.
llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
getPartialSpecializations() {
- return CommonPtr->PartialSpecializations;
+ return getCommonPtr()->PartialSpecializations;
}
/// \brief Retrieve the partial specializations as an ordered list.
@@ -1281,7 +1476,7 @@ public:
/// typedef array this_type; // "array" is equivalent to "array<T, N>"
/// };
/// \endcode
- QualType getInjectedClassNameSpecialization(ASTContext &Context);
+ QualType getInjectedClassNameSpecialization();
/// \brief Retrieve the member class template that this class template was
/// derived from.
@@ -1303,13 +1498,13 @@ public:
/// X<T>::A<U>, a TemplateClassDecl (whose parent is X<T>, also a TCD).
///
/// \returns null if this is not an instantiation of a member class template.
- ClassTemplateDecl *getInstantiatedFromMemberTemplate() const {
- return CommonPtr->InstantiatedFromMember.getPointer();
+ ClassTemplateDecl *getInstantiatedFromMemberTemplate() {
+ return getCommonPtr()->InstantiatedFromMember.getPointer();
}
void setInstantiatedFromMemberTemplate(ClassTemplateDecl *CTD) {
- assert(!CommonPtr->InstantiatedFromMember.getPointer());
- CommonPtr->InstantiatedFromMember.setPointer(CTD);
+ assert(!getCommonPtr()->InstantiatedFromMember.getPointer());
+ getCommonPtr()->InstantiatedFromMember.setPointer(CTD);
}
/// \brief Determines whether this template was a specialization of a
@@ -1328,14 +1523,14 @@ public:
/// struct X<int>::Inner { /* ... */ };
/// \endcode
bool isMemberSpecialization() {
- return CommonPtr->InstantiatedFromMember.getInt();
+ return getCommonPtr()->InstantiatedFromMember.getInt();
}
/// \brief Note that this member template is a specialization.
void setMemberSpecialization() {
- assert(CommonPtr->InstantiatedFromMember.getPointer() &&
+ assert(getCommonPtr()->InstantiatedFromMember.getPointer() &&
"Only member templates can be member template specializations");
- CommonPtr->InstantiatedFromMember.setInt(true);
+ getCommonPtr()->InstantiatedFromMember.setInt(true);
}
// Implement isa/cast/dyncast support
@@ -1344,6 +1539,9 @@ public:
static bool classofKind(Kind K) { return K == ClassTemplate; }
virtual void Destroy(ASTContext& C);
+
+ friend class PCHDeclReader;
+ friend class PCHDeclWriter;
};
/// Declaration of a friend template. For example:
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
index 140e5c0..aee1998 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
@@ -30,20 +30,19 @@ class DeclVisitor {
public:
RetTy Visit(Decl *D) {
switch (D->getKind()) {
- default: assert(false && "Decl that isn't part of DeclNodes.def!");
-#define DECL(Derived, Base) \
- case Decl::Derived: DISPATCH(Derived##Decl, Derived##Decl);
-#define ABSTRACT_DECL(Derived, Base)
-#include "clang/AST/DeclNodes.def"
+ default: assert(false && "Decl that isn't part of DeclNodes.inc!");
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: DISPATCH(DERIVED##Decl, DERIVED##Decl);
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
// If the implementation chooses not to implement a certain visit
// method, fall back to the parent.
-#define DECL(Derived, Base) \
- RetTy Visit##Derived##Decl(Derived##Decl *D) { DISPATCH(Base, Base); }
-#define ABSTRACT_DECL(Derived, Base) DECL(Derived, Base)
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) \
+ RetTy Visit##DERIVED##Decl(DERIVED##Decl *D) { DISPATCH(BASE, BASE); }
+#include "clang/AST/DeclNodes.inc"
RetTy VisitDecl(Decl *D) { return RetTy(); }
};
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
index 66639e2..ade2b09 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -162,9 +162,6 @@ public:
};
isLvalueResult isLvalue(ASTContext &Ctx) const;
- // Same as above, but excluding checks for non-object and void types in C
- isLvalueResult isLvalueInternal(ASTContext &Ctx) const;
-
/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
/// does not have an incomplete type, does not have a const-qualified type,
/// and if it is a structure or union, does not have any member (including,
@@ -194,6 +191,95 @@ public:
isModifiableLvalueResult isModifiableLvalue(ASTContext &Ctx,
SourceLocation *Loc = 0) const;
+ /// \brief The return type of classify(). Represents the C++0x expression
+ /// taxonomy.
+ class Classification {
+ public:
+ /// \brief The various classification results. Most of these mean prvalue.
+ enum Kinds {
+ CL_LValue,
+ CL_XValue,
+ CL_Function, // Functions cannot be lvalues in C.
+ CL_Void, // Void cannot be an lvalue in C.
+ CL_DuplicateVectorComponents, // A vector shuffle with dupes.
+ CL_MemberFunction, // An expression referring to a member function
+ CL_SubObjCPropertySetting,
+ CL_ClassTemporary, // A prvalue of class type
+ CL_PRValue // A prvalue for any other reason, of any other type
+ };
+ /// \brief The results of modification testing.
+ enum ModifiableType {
+ CM_Untested, // testModifiable was false.
+ CM_Modifiable,
+ CM_RValue, // Not modifiable because it's an rvalue
+ CM_Function, // Not modifiable because it's a function; C++ only
+ CM_LValueCast, // Same as CM_RValue, but indicates GCC cast-as-lvalue ext
+ CM_NotBlockQualified, // Not captured in the closure
+ CM_NoSetterProperty,// Implicit assignment to ObjC property without setter
+ CM_ConstQualified,
+ CM_ArrayType,
+ CM_IncompleteType
+ };
+
+ private:
+ friend class Expr;
+
+ unsigned short Kind;
+ unsigned short Modifiable;
+
+ explicit Classification(Kinds k, ModifiableType m)
+ : Kind(k), Modifiable(m)
+ {}
+
+ public:
+ Classification() {}
+
+ Kinds getKind() const { return static_cast<Kinds>(Kind); }
+ ModifiableType getModifiable() const {
+ assert(Modifiable != CM_Untested && "Did not test for modifiability.");
+ return static_cast<ModifiableType>(Modifiable);
+ }
+ bool isLValue() const { return Kind == CL_LValue; }
+ bool isXValue() const { return Kind == CL_XValue; }
+ bool isGLValue() const { return Kind <= CL_XValue; }
+ bool isPRValue() const { return Kind >= CL_Function; }
+ bool isRValue() const { return Kind >= CL_XValue; }
+ bool isModifiable() const { return getModifiable() == CM_Modifiable; }
+ };
+ /// \brief classify - Classify this expression according to the C++0x
+ /// expression taxonomy.
+ ///
+ /// C++0x defines ([basic.lval]) a new taxonomy of expressions to replace the
+ /// old lvalue vs rvalue. This function determines the type of expression this
+ /// is. There are three expression types:
+ /// - lvalues are classical lvalues as in C++03.
+ /// - prvalues are equivalent to rvalues in C++03.
+ /// - xvalues are expressions yielding unnamed rvalue references, e.g. a
+ /// function returning an rvalue reference.
+ /// lvalues and xvalues are collectively referred to as glvalues, while
+ /// prvalues and xvalues together form rvalues.
+ /// If a
+ Classification Classify(ASTContext &Ctx) const {
+ return ClassifyImpl(Ctx, 0);
+ }
+
+ /// \brief classifyModifiable - Classify this expression according to the
+ /// C++0x expression taxonomy, and see if it is valid on the left side
+ /// of an assignment.
+ ///
+ /// This function extends classify in that it also tests whether the
+ /// expression is modifiable (C99 6.3.2.1p1).
+ /// \param Loc A source location that might be filled with a relevant location
+ /// if the expression is not modifiable.
+ Classification ClassifyModifiable(ASTContext &Ctx, SourceLocation &Loc) const{
+ return ClassifyImpl(Ctx, &Loc);
+ }
+
+private:
+ Classification ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const;
+
+public:
+
/// \brief If this expression refers to a bit-field, retrieve the
/// declaration of that bit-field.
FieldDecl *getBitField();
@@ -414,6 +500,7 @@ struct ExplicitTemplateArgumentList {
void initializeFrom(const TemplateArgumentListInfo &List);
void copyInto(TemplateArgumentListInfo &List) const;
+ static std::size_t sizeFor(unsigned NumTemplateArgs);
static std::size_t sizeFor(const TemplateArgumentListInfo &List);
};
@@ -474,27 +561,21 @@ class DeclRefExpr : public Expr {
ValueDecl *D, SourceLocation NameLoc,
const TemplateArgumentListInfo *TemplateArgs,
QualType T);
+
+ /// \brief Construct an empty declaration reference expression.
+ explicit DeclRefExpr(EmptyShell Empty)
+ : Expr(DeclRefExprClass, Empty) { }
-protected:
/// \brief Computes the type- and value-dependence flags for this
/// declaration reference expression.
void computeDependence();
- DeclRefExpr(StmtClass SC, ValueDecl *d, QualType t, SourceLocation l) :
- Expr(SC, t, false, false), DecoratedD(d, 0), Loc(l) {
- computeDependence();
- }
-
public:
DeclRefExpr(ValueDecl *d, QualType t, SourceLocation l) :
Expr(DeclRefExprClass, t, false, false), DecoratedD(d, 0), Loc(l) {
computeDependence();
}
- /// \brief Construct an empty declaration reference expression.
- explicit DeclRefExpr(EmptyShell Empty)
- : Expr(DeclRefExprClass, Empty) { }
-
static DeclRefExpr *Create(ASTContext &Context,
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange,
@@ -502,6 +583,10 @@ public:
SourceLocation NameLoc,
QualType T,
const TemplateArgumentListInfo *TemplateArgs = 0);
+
+ /// \brief Construct an empty declaration reference expression.
+ static DeclRefExpr *CreateEmpty(ASTContext &Context,
+ bool HasQualifier, unsigned NumTemplateArgs);
ValueDecl *getDecl() { return DecoratedD.getPointer(); }
const ValueDecl *getDecl() const { return DecoratedD.getPointer(); }
@@ -591,6 +676,9 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
+ friend class PCHStmtWriter;
};
/// PredefinedExpr - [C99 6.4.2.2] - A predefined identifier such as __func__.
@@ -1560,11 +1648,6 @@ public:
Base(base), MemberDecl(memberdecl), MemberLoc(l), IsArrow(isarrow),
HasQualifierOrFoundDecl(false), HasExplicitTemplateArgumentList(false) {}
- /// \brief Build an empty member reference expression.
- explicit MemberExpr(EmptyShell Empty)
- : Expr(MemberExprClass, Empty), HasQualifierOrFoundDecl(false),
- HasExplicitTemplateArgumentList(false) { }
-
static MemberExpr *Create(ASTContext &C, Expr *base, bool isarrow,
NestedNameSpecifier *qual, SourceRange qualrange,
ValueDecl *memberdecl, DeclAccessPair founddecl,
@@ -1771,6 +1854,10 @@ public:
/// CK_BitCast - Used for reinterpret_cast.
CK_BitCast,
+ /// CK_LValueBitCast - Used for reinterpret_cast of expressions to
+ /// a reference type.
+ CK_LValueBitCast,
+
/// CK_NoOp - Used for const_cast.
CK_NoOp,
@@ -1874,6 +1961,7 @@ private:
// These should not have an inheritance path.
case CK_Unknown:
case CK_BitCast:
+ case CK_LValueBitCast:
case CK_NoOp:
case CK_Dynamic:
case CK_ToUnion:
@@ -1937,6 +2025,7 @@ public:
}
const CXXBaseSpecifierArray& getBasePath() const { return BasePath; }
+ CXXBaseSpecifierArray& getBasePath() { return BasePath; }
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstCastExprConstant &&
@@ -2169,7 +2258,8 @@ public:
/// predicates to categorize the respective opcodes.
bool isMultiplicativeOp() const { return Opc >= Mul && Opc <= Rem; }
- bool isAdditiveOp() const { return Opc == Add || Opc == Sub; }
+ static bool isAdditiveOp(Opcode Opc) { return Opc == Add || Opc == Sub; }
+ bool isAdditiveOp() const { return isAdditiveOp(Opc); }
static bool isShiftOp(Opcode Opc) { return Opc == Shl || Opc == Shr; }
bool isShiftOp() const { return isShiftOp(Opc); }
@@ -3153,7 +3243,7 @@ public:
~ParenListExpr() {}
/// \brief Build an empty paren list.
- //explicit ParenListExpr(EmptyShell Empty) : Expr(ParenListExprClass, Empty) { }
+ explicit ParenListExpr(EmptyShell Empty) : Expr(ParenListExprClass, Empty) { }
unsigned getNumExprs() const { return NumExprs; }
@@ -3183,6 +3273,9 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
+ friend class PCHStmtWriter;
};
@@ -3305,12 +3398,15 @@ class BlockDeclRefExpr : public Expr {
SourceLocation Loc;
bool IsByRef : 1;
bool ConstQualAdded : 1;
+ Stmt *CopyConstructorVal;
public:
// FIXME: Fix type/value dependence!
BlockDeclRefExpr(ValueDecl *d, QualType t, SourceLocation l, bool ByRef,
- bool constAdded = false)
- : Expr(BlockDeclRefExprClass, t, false, false), D(d), Loc(l), IsByRef(ByRef),
- ConstQualAdded(constAdded) {}
+ bool constAdded = false,
+ Stmt *copyConstructorVal = 0)
+ : Expr(BlockDeclRefExprClass, t, (!t.isNull() && t->isDependentType()),false),
+ D(d), Loc(l), IsByRef(ByRef),
+ ConstQualAdded(constAdded), CopyConstructorVal(copyConstructorVal) {}
// \brief Build an empty reference to a declared variable in a
// block.
@@ -3331,6 +3427,12 @@ public:
bool isConstQualAdded() const { return ConstQualAdded; }
void setConstQualAdded(bool C) { ConstQualAdded = C; }
+
+ const Expr *getCopyConstructorExpr() const
+ { return cast_or_null<Expr>(CopyConstructorVal); }
+ Expr *getCopyConstructorExpr()
+ { return cast_or_null<Expr>(CopyConstructorVal); }
+ void setCopyConstructorExpr(Expr *E) { CopyConstructorVal = E; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BlockDeclRefExprClass;
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
index 0c493f3..b955381 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -465,7 +465,6 @@ class CXXDefaultArgExpr : public Expr {
/// \brief The location where the default argument expression was used.
SourceLocation Loc;
-protected:
CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param)
: Expr(SC,
param->hasUnparsedDefaultArg()
@@ -504,9 +503,6 @@ public:
// Retrieve the parameter that the argument was created from.
const ParmVarDecl *getParam() const { return Param.getPointer(); }
ParmVarDecl *getParam() { return Param.getPointer(); }
-
- /// isExprStored - Return true if this expression owns the expression.
- bool isExprStored() const { return Param.getInt(); }
// Retrieve the actual argument to the function call.
const Expr *getExpr() const {
@@ -519,16 +515,10 @@ public:
return *reinterpret_cast<Expr **> (this + 1);
return getParam()->getDefaultArg();
}
-
- void setExpr(Expr *E) {
- Param.setInt(true);
- Param.setPointer((ParmVarDecl*)E);
- }
/// \brief Retrieve the location where this default argument was actually
/// used.
SourceLocation getUsedLocation() const { return Loc; }
- void setUsedLocation(SourceLocation L) { Loc = L; }
virtual SourceRange getSourceRange() const {
// Default argument expressions have no representation in the
@@ -544,6 +534,9 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
+ friend class PCHStmtWriter;
};
/// CXXTemporary - Represents a C++ temporary.
@@ -655,6 +648,9 @@ public:
static CXXBindReferenceExpr *Create(ASTContext &C, Expr *SubExpr,
bool ExtendsLifetime,
bool RequiresTemporaryCopy);
+
+ explicit CXXBindReferenceExpr(EmptyShell Empty)
+ : Expr(CXXBindReferenceExprClass, Empty) { }
const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
Expr *getSubExpr() { return cast<Expr>(SubExpr); }
@@ -670,7 +666,7 @@ public:
// extendsLifetime - Whether binding this reference extends the lifetime of
// the expression being bound. FIXME: Add C++ reference.
- bool extendsLifetime() { return ExtendsLifetime; }
+ bool extendsLifetime() const { return ExtendsLifetime; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Stmt *T) {
@@ -681,6 +677,8 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
};
/// CXXConstructExpr - Represents a call to a C++ constructor.
@@ -711,13 +709,20 @@ protected:
ConstructionKind ConstructKind = CK_Complete);
~CXXConstructExpr() { }
+ /// \brief Construct an empty C++ construction expression.
+ CXXConstructExpr(StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty), Constructor(0), Elidable(0), ZeroInitialization(0),
+ ConstructKind(0), Args(0), NumArgs(0) { }
+
virtual void DoDestroy(ASTContext &C);
public:
- /// \brief Construct an empty C++ construction expression that will store
- /// \p numargs arguments.
- CXXConstructExpr(EmptyShell Empty, ASTContext &C, unsigned numargs);
-
+ /// \brief Construct an empty C++ construction expression.
+ explicit CXXConstructExpr(EmptyShell Empty)
+ : Expr(CXXConstructExprClass, Empty), Constructor(0),
+ Elidable(0), ZeroInitialization(0),
+ ConstructKind(0), Args(0), NumArgs(0) { }
+
static CXXConstructExpr *Create(ASTContext &C, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool Elidable,
@@ -790,6 +795,8 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
};
/// CXXFunctionalCastExpr - Represents an explicit C++ type conversion
@@ -829,12 +836,8 @@ public:
///
/// This expression type represents a C++ "functional" cast
/// (C++[expr.type.conv]) with N != 1 arguments that invokes a
-/// constructor to build a temporary object. If N == 0 but no
-/// constructor will be called (because the functional cast is
-/// performing a value-initialized an object whose class type has no
-/// user-declared constructors), CXXZeroInitValueExpr will represent
-/// the functional cast. Finally, with N == 1 arguments the functional
-/// cast expression will be represented by CXXFunctionalCastExpr.
+/// constructor to build a temporary object. With N == 1 arguments the
+/// functional cast expression will be represented by CXXFunctionalCastExpr.
/// Example:
/// @code
/// struct X { X(int, float); }
@@ -853,6 +856,8 @@ public:
Expr **Args,unsigned NumArgs,
SourceLocation rParenLoc,
bool ZeroInitialization = false);
+ explicit CXXTemporaryObjectExpr(EmptyShell Empty)
+ : CXXConstructExpr(CXXTemporaryObjectExprClass, Empty) { }
~CXXTemporaryObjectExpr() { }
@@ -866,24 +871,25 @@ public:
return T->getStmtClass() == CXXTemporaryObjectExprClass;
}
static bool classof(const CXXTemporaryObjectExpr *) { return true; }
+
+ friend class PCHStmtReader;
};
-/// CXXZeroInitValueExpr - [C++ 5.2.3p2]
+/// CXXScalarValueInitExpr - [C++ 5.2.3p2]
/// Expression "T()" which creates a value-initialized rvalue of type
-/// T, which is either a non-class type or a class type without any
-/// user-defined constructors.
+/// T, which is a non-class type.
///
-class CXXZeroInitValueExpr : public Expr {
+class CXXScalarValueInitExpr : public Expr {
SourceLocation TyBeginLoc;
SourceLocation RParenLoc;
public:
- CXXZeroInitValueExpr(QualType ty, SourceLocation tyBeginLoc,
+ CXXScalarValueInitExpr(QualType ty, SourceLocation tyBeginLoc,
SourceLocation rParenLoc ) :
- Expr(CXXZeroInitValueExprClass, ty, false, false),
+ Expr(CXXScalarValueInitExprClass, ty, false, false),
TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {}
- explicit CXXZeroInitValueExpr(EmptyShell Shell)
- : Expr(CXXZeroInitValueExprClass, Shell) { }
+ explicit CXXScalarValueInitExpr(EmptyShell Shell)
+ : Expr(CXXScalarValueInitExprClass, Shell) { }
SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
@@ -902,9 +908,9 @@ public:
}
static bool classof(const Stmt *T) {
- return T->getStmtClass() == CXXZeroInitValueExprClass;
+ return T->getStmtClass() == CXXScalarValueInitExprClass;
}
- static bool classof(const CXXZeroInitValueExpr *) { return true; }
+ static bool classof(const CXXScalarValueInitExpr *) { return true; }
// Iterators
virtual child_iterator child_begin();
@@ -916,15 +922,13 @@ public:
class CXXNewExpr : public Expr {
// Was the usage ::new, i.e. is the global new to be used?
bool GlobalNew : 1;
- // Was the form (type-id) used? Otherwise, it was new-type-id.
- bool ParenTypeId : 1;
// Is there an initializer? If not, built-ins are uninitialized, else they're
// value-initialized.
bool Initializer : 1;
// Do we allocate an array? If so, the first SubExpr is the size expression.
bool Array : 1;
// The number of placement new arguments.
- unsigned NumPlacementArgs : 14;
+ unsigned NumPlacementArgs : 15;
// The number of constructor arguments. This may be 1 even for non-class
// types; use the pseudo copy constructor.
unsigned NumConstructorArgs : 14;
@@ -941,12 +945,18 @@ class CXXNewExpr : public Expr {
// Must be null for all other types.
CXXConstructorDecl *Constructor;
+ /// \brief If the allocated type was expressed as a parenthesized type-id,
+ /// the source range covering the parenthesized type-id.
+ SourceRange TypeIdParens;
+
SourceLocation StartLoc;
SourceLocation EndLoc;
+ friend class PCHStmtReader;
public:
CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
- Expr **placementArgs, unsigned numPlaceArgs, bool ParenTypeId,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange TypeIdParens,
Expr *arraySize, CXXConstructorDecl *constructor, bool initializer,
Expr **constructorArgs, unsigned numConsArgs,
FunctionDecl *operatorDelete, QualType ty,
@@ -989,10 +999,11 @@ public:
return cast<Expr>(SubExprs[Array + i]);
}
+ bool isParenTypeId() const { return TypeIdParens.isValid(); }
+ SourceRange getTypeIdParens() const { return TypeIdParens; }
+
bool isGlobalNew() const { return GlobalNew; }
void setGlobalNew(bool V) { GlobalNew = V; }
- bool isParenTypeId() const { return ParenTypeId; }
- void setParenTypeId(bool V) { ParenTypeId = V; }
bool hasInitializer() const { return Initializer; }
void setHasInitializer(bool V) { Initializer = V; }
@@ -1082,18 +1093,26 @@ public:
: Expr(CXXDeleteExprClass, ty, false, false), GlobalDelete(globalDelete),
ArrayForm(arrayForm), OperatorDelete(operatorDelete), Argument(arg),
Loc(loc) { }
+ explicit CXXDeleteExpr(EmptyShell Shell)
+ : Expr(CXXDeleteExprClass, Shell), OperatorDelete(0), Argument(0) { }
bool isGlobalDelete() const { return GlobalDelete; }
bool isArrayForm() const { return ArrayForm; }
+
+ void setGlobalDelete(bool V) { GlobalDelete = V; }
+ void setArrayForm(bool V) { ArrayForm = V; }
FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
+ void setOperatorDelete(FunctionDecl *D) { OperatorDelete = D; }
Expr *getArgument() { return cast<Expr>(Argument); }
const Expr *getArgument() const { return cast<Expr>(Argument); }
+ void setArgument(Expr *E) { Argument = E; }
virtual SourceRange getSourceRange() const {
return SourceRange(Loc, Argument->getLocEnd());
}
+ void setStartLoc(SourceLocation L) { Loc = L; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CXXDeleteExprClass;
@@ -1215,6 +1234,10 @@ public:
ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
DestroyedType(DestroyedType) { }
+ explicit CXXPseudoDestructorExpr(EmptyShell Shell)
+ : Expr(CXXPseudoDestructorExprClass, Shell),
+ Base(0), IsArrow(false), Qualifier(0), ScopeType(0) { }
+
void setBase(Expr *E) { Base = E; }
Expr *getBase() const { return cast<Expr>(Base); }
@@ -1227,11 +1250,13 @@ public:
/// the nested-name-specifier that precedes the member name. Otherwise,
/// returns an empty source range.
SourceRange getQualifierRange() const { return QualifierRange; }
+ void setQualifierRange(SourceRange R) { QualifierRange = R; }
/// \brief If the member name was qualified, retrieves the
/// nested-name-specifier that precedes the member name. Otherwise, returns
/// NULL.
NestedNameSpecifier *getQualifier() const { return Qualifier; }
+ void setQualifier(NestedNameSpecifier *NNS) { Qualifier = NNS; }
/// \brief Determine whether this pseudo-destructor expression was written
/// using an '->' (otherwise, it used a '.').
@@ -1240,6 +1265,7 @@ public:
/// \brief Retrieve the location of the '.' or '->' operator.
SourceLocation getOperatorLoc() const { return OperatorLoc; }
+ void setOperatorLoc(SourceLocation L) { OperatorLoc = L; }
/// \brief Retrieve the scope type in a qualified pseudo-destructor
/// expression.
@@ -1251,13 +1277,16 @@ public:
/// nested-name-specifier. It is stored as the "scope type" of the pseudo-
/// destructor expression.
TypeSourceInfo *getScopeTypeInfo() const { return ScopeType; }
+ void setScopeTypeInfo(TypeSourceInfo *Info) { ScopeType = Info; }
/// \brief Retrieve the location of the '::' in a qualified pseudo-destructor
/// expression.
SourceLocation getColonColonLoc() const { return ColonColonLoc; }
+ void setColonColonLoc(SourceLocation L) { ColonColonLoc = L; }
/// \brief Retrieve the location of the '~'.
SourceLocation getTildeLoc() const { return TildeLoc; }
+ void setTildeLoc(SourceLocation L) { TildeLoc = L; }
/// \brief Retrieve the source location information for the type
/// being destroyed.
@@ -1285,6 +1314,17 @@ public:
return DestroyedType.getLocation();
}
+ /// \brief Set the name of destroyed type for a dependent pseudo-destructor
+ /// expression.
+ void setDestroyedType(IdentifierInfo *II, SourceLocation Loc) {
+ DestroyedType = PseudoDestructorTypeStorage(II, Loc);
+ }
+
+ /// \brief Set the destroyed type.
+ void setDestroyedType(TypeSourceInfo *Info) {
+ DestroyedType = PseudoDestructorTypeStorage(Info);
+ }
+
virtual SourceRange getSourceRange() const;
static bool classof(const Stmt *T) {
@@ -1321,6 +1361,9 @@ public:
: Expr(UnaryTypeTraitExprClass, ty, false, queried->isDependentType()),
UTT(utt), Loc(loc), RParen(rparen), QueriedType(queried) { }
+ explicit UnaryTypeTraitExpr(EmptyShell Empty)
+ : Expr(UnaryTypeTraitExprClass, Empty), UTT((UnaryTypeTrait)0) { }
+
virtual SourceRange getSourceRange() const { return SourceRange(Loc, RParen);}
UnaryTypeTrait getTrait() const { return UTT; }
@@ -1337,6 +1380,8 @@ public:
// Iterators
virtual child_iterator child_begin();
virtual child_iterator child_end();
+
+ friend class PCHStmtReader;
};
/// \brief A reference to an overloaded function set, either an
@@ -1361,16 +1406,20 @@ class OverloadExpr : public Expr {
/// The location of the name.
SourceLocation NameLoc;
+protected:
/// True if the name was a template-id.
bool HasExplicitTemplateArgs;
-protected:
OverloadExpr(StmtClass K, ASTContext &C, QualType T, bool Dependent,
NestedNameSpecifier *Qualifier, SourceRange QRange,
DeclarationName Name, SourceLocation NameLoc,
bool HasTemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+ OverloadExpr(StmtClass K, EmptyShell Empty)
+ : Expr(K, Empty), Results(0), NumResults(0),
+ Qualifier(0), HasExplicitTemplateArgs(false) { }
+
public:
/// Computes whether an unresolved lookup on the given declarations
/// and optional template arguments is type- and value-dependent.
@@ -1401,6 +1450,9 @@ public:
decls_iterator decls_end() const {
return UnresolvedSetIterator(Results + NumResults);
}
+
+ void initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,UnresolvedSetIterator End);
/// Gets the number of declarations in the unresolved set.
unsigned getNumDecls() const { return NumResults; }
@@ -1415,9 +1467,11 @@ public:
/// Fetches the nested-name qualifier, if one was given.
NestedNameSpecifier *getQualifier() const { return Qualifier; }
+ void setQualifier(NestedNameSpecifier *NNS) { Qualifier = NNS; }
/// Fetches the range of the nested-name qualifier.
SourceRange getQualifierRange() const { return QualifierRange; }
+ void setQualifierRange(SourceRange R) { QualifierRange = R; }
/// \brief Determines whether this expression had an explicit
/// template argument list, e.g. f<int>.
@@ -1480,6 +1534,11 @@ class UnresolvedLookupExpr : public OverloadExpr {
RequiresADL(RequiresADL), Overloaded(Overloaded), NamingClass(NamingClass)
{}
+ UnresolvedLookupExpr(EmptyShell Empty)
+ : OverloadExpr(UnresolvedLookupExprClass, Empty),
+ RequiresADL(false), Overloaded(false), NamingClass(0)
+ {}
+
public:
static UnresolvedLookupExpr *Create(ASTContext &C,
bool Dependent,
@@ -1511,17 +1570,23 @@ public:
UnresolvedSetIterator Begin,
UnresolvedSetIterator End);
+ static UnresolvedLookupExpr *CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs);
+
/// True if this declaration should be extended by
/// argument-dependent lookup.
bool requiresADL() const { return RequiresADL; }
+ void setRequiresADL(bool V) { RequiresADL = V; }
/// True if this lookup is overloaded.
bool isOverloaded() const { return Overloaded; }
+ void setOverloaded(bool V) { Overloaded = V; }
/// Gets the 'naming class' (in the sense of C++0x
/// [class.access.base]p5) of the lookup. This is the scope
/// that was looked in to find these results.
CXXRecordDecl *getNamingClass() const { return NamingClass; }
+ void setNamingClass(CXXRecordDecl *D) { NamingClass = D; }
// Note that, inconsistently with the explicit-template-argument AST
// nodes, users are *forbidden* from calling these methods on objects
@@ -1628,18 +1693,25 @@ public:
SourceLocation NameLoc,
const TemplateArgumentListInfo *TemplateArgs = 0);
+ static DependentScopeDeclRefExpr *CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs);
+
/// \brief Retrieve the name that this expression refers to.
DeclarationName getDeclName() const { return Name; }
+ void setDeclName(DeclarationName N) { Name = N; }
/// \brief Retrieve the location of the name within the expression.
SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
/// \brief Retrieve the source range of the nested-name-specifier.
SourceRange getQualifierRange() const { return QualifierRange; }
+ void setQualifierRange(SourceRange R) { QualifierRange = R; }
/// \brief Retrieve the nested-name-specifier that qualifies this
/// declaration.
NestedNameSpecifier *getQualifier() const { return Qualifier; }
+ void setQualifier(NestedNameSpecifier *NNS) { Qualifier = NNS; }
/// Determines whether this lookup had explicit template arguments.
bool hasExplicitTemplateArgs() const { return HasExplicitTemplateArgs; }
@@ -1648,6 +1720,11 @@ public:
// nodes, users are *forbidden* from calling these methods on objects
// without explicit template arguments.
+ ExplicitTemplateArgumentList &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *reinterpret_cast<ExplicitTemplateArgumentList*>(this + 1);
+ }
+
/// Gets a reference to the explicit template argument list.
const ExplicitTemplateArgumentList &getExplicitTemplateArgs() const {
assert(hasExplicitTemplateArgs());
@@ -1792,6 +1869,9 @@ class CXXUnresolvedConstructExpr : public Expr {
unsigned NumArgs,
SourceLocation RParenLoc);
+ CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
+ : Expr(CXXUnresolvedConstructExprClass, Empty), NumArgs(NumArgs) { }
+
public:
static CXXUnresolvedConstructExpr *Create(ASTContext &C,
SourceLocation TyBegin,
@@ -1801,6 +1881,9 @@ public:
unsigned NumArgs,
SourceLocation RParenLoc);
+ static CXXUnresolvedConstructExpr *CreateEmpty(ASTContext &C,
+ unsigned NumArgs);
+
/// \brief Retrieve the source location where the type begins.
SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
void setTypeBeginLoc(SourceLocation L) { TyBeginLoc = L; }
@@ -1845,6 +1928,11 @@ public:
return *(arg_begin() + I);
}
+ void setArg(unsigned I, Expr *E) {
+ assert(I < NumArgs && "Argument index out-of-range");
+ *(arg_begin() + I) = E;
+ }
+
virtual SourceRange getSourceRange() const {
return SourceRange(TyBeginLoc, RParenLoc);
}
@@ -1908,20 +1996,6 @@ class CXXDependentScopeMemberExpr : public Expr {
/// \brief The location of the member name.
SourceLocation MemberLoc;
- /// \brief Retrieve the explicit template argument list that followed the
- /// member template name, if any.
- ExplicitTemplateArgumentList *getExplicitTemplateArgumentList() {
- assert(HasExplicitTemplateArgs);
- return reinterpret_cast<ExplicitTemplateArgumentList *>(this + 1);
- }
-
- /// \brief Retrieve the explicit template argument list that followed the
- /// member template name, if any.
- const ExplicitTemplateArgumentList *getExplicitTemplateArgumentList() const {
- return const_cast<CXXDependentScopeMemberExpr *>(this)
- ->getExplicitTemplateArgumentList();
- }
-
CXXDependentScopeMemberExpr(ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
@@ -1960,6 +2034,9 @@ public:
SourceLocation MemberLoc,
const TemplateArgumentListInfo *TemplateArgs);
+ static CXXDependentScopeMemberExpr *
+ CreateEmpty(ASTContext &C, unsigned NumTemplateArgs);
+
/// \brief True if this is an implicit access, i.e. one in which the
/// member being accessed was not written in the source. The source
/// location of the operator is invalid in this case.
@@ -1974,6 +2051,7 @@ public:
void setBase(Expr *E) { Base = E; }
QualType getBaseType() const { return BaseType; }
+ void setBaseType(QualType T) { BaseType = T; }
/// \brief Determine whether this member expression used the '->'
/// operator; otherwise, it used the '.' operator.
@@ -1987,10 +2065,12 @@ public:
/// \brief Retrieve the nested-name-specifier that qualifies the member
/// name.
NestedNameSpecifier *getQualifier() const { return Qualifier; }
+ void setQualifier(NestedNameSpecifier *NNS) { Qualifier = NNS; }
/// \brief Retrieve the source range covering the nested-name-specifier
/// that qualifies the member name.
SourceRange getQualifierRange() const { return QualifierRange; }
+ void setQualifierRange(SourceRange R) { QualifierRange = R; }
/// \brief Retrieve the first part of the nested-name-specifier that was
/// found in the scope of the member access expression when the member access
@@ -2006,6 +2086,9 @@ public:
NamedDecl *getFirstQualifierFoundInScope() const {
return FirstQualifierFoundInScope;
}
+ void setFirstQualifierFoundInScope(NamedDecl *D) {
+ FirstQualifierFoundInScope = D;
+ }
/// \brief Retrieve the name of the member that this expression
/// refers to.
@@ -2023,6 +2106,20 @@ public:
return HasExplicitTemplateArgs;
}
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name, if any.
+ ExplicitTemplateArgumentList *getExplicitTemplateArgumentList() {
+ assert(HasExplicitTemplateArgs);
+ return reinterpret_cast<ExplicitTemplateArgumentList *>(this + 1);
+ }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name, if any.
+ const ExplicitTemplateArgumentList *getExplicitTemplateArgumentList() const {
+ return const_cast<CXXDependentScopeMemberExpr *>(this)
+ ->getExplicitTemplateArgumentList();
+ }
+
/// \brief Copies the template arguments (if present) into the given
/// structure.
void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
@@ -2030,6 +2127,12 @@ public:
getExplicitTemplateArgumentList()->copyInto(List);
}
+ /// \brief Initializes the template arguments using the given structure.
+ void initializeTemplateArgumentsFrom(const TemplateArgumentListInfo &List) {
+ assert(HasExplicitTemplateArgs);
+ getExplicitTemplateArgumentList()->initializeFrom(List);
+ }
+
/// \brief Retrieve the location of the left angle bracket following the
/// member name ('<'), if any.
SourceLocation getLAngleLoc() const {
@@ -2127,6 +2230,10 @@ class UnresolvedMemberExpr : public OverloadExpr {
SourceLocation MemberLoc,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+
+ UnresolvedMemberExpr(EmptyShell Empty)
+ : OverloadExpr(UnresolvedMemberExprClass, Empty), IsArrow(false),
+ HasUnresolvedUsing(false), Base(0) { }
public:
static UnresolvedMemberExpr *
@@ -2140,6 +2247,9 @@ public:
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+ static UnresolvedMemberExpr *
+ CreateEmpty(ASTContext &C, unsigned NumTemplateArgs);
+
/// \brief True if this is an implicit access, i.e. one in which the
/// member being accessed was not written in the source. The source
/// location of the operator is invalid in this case.
@@ -2158,6 +2268,12 @@ public:
void setBase(Expr *E) { Base = E; }
QualType getBaseType() const { return BaseType; }
+ void setBaseType(QualType T) { BaseType = T; }
+
+ /// \brief Determine whether the lookup results contain an unresolved using
+ /// declaration.
+ bool hasUnresolvedUsing() const { return HasUnresolvedUsing; }
+ void setHasUnresolvedUsing(bool V) { HasUnresolvedUsing = V; }
/// \brief Determine whether this member expression used the '->'
/// operator; otherwise, it used the '.' operator.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
index 79e4451..def9ced 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -58,67 +58,85 @@ public:
virtual ~ExternalASTSource();
- /// \brief Resolve a type ID into a type, potentially building a new
- /// type.
- virtual QualType GetType(uint32_t ID) = 0;
-
/// \brief Resolve a declaration ID into a declaration, potentially
/// building a new declaration.
- virtual Decl *GetDecl(uint32_t ID) = 0;
+ ///
+ /// This method only needs to be implemented if the AST source ever
+ /// passes back decl sets as VisibleDeclaration objects.
+ virtual Decl *GetExternalDecl(uint32_t ID) = 0;
/// \brief Resolve a selector ID into a selector.
- virtual Selector GetSelector(uint32_t ID) = 0;
+ ///
+ /// This operation only needs to be implemented if the AST source
+ /// returns non-zero for GetNumKnownSelectors().
+ virtual Selector GetExternalSelector(uint32_t ID) = 0;
/// \brief Returns the number of selectors known to the external AST
/// source.
- virtual uint32_t GetNumKnownSelectors() = 0;
+ virtual uint32_t GetNumExternalSelectors() = 0;
- /// \brief Resolve the offset of a statement in the decl stream into a
- /// statement.
+ /// \brief Resolve the offset of a statement in the decl stream into
+ /// a statement.
///
- /// This operation will read a new statement from the external
- /// source each time it is called, and is meant to be used via a
- /// LazyOffsetPtr.
- virtual Stmt *GetDeclStmt(uint64_t Offset) = 0;
+ /// This operation is meant to be used via a LazyOffsetPtr. It only
+ /// needs to be implemented if the AST source uses methods like
+ /// FunctionDecl::setLazyBody when building decls.
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset) = 0;
- /// \brief Read all of the declarations lexically stored in a
- /// declaration context.
+ /// \brief Finds all declarations with the given name in the
+ /// given context.
///
- /// \param DC The declaration context whose declarations will be
- /// read.
+ /// Generally the final step of this method is either to call
+ /// SetExternalVisibleDeclsForName or to recursively call lookup on
+ /// the DeclContext after calling SetExternalVisibleDecls.
+ virtual DeclContext::lookup_result
+ FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) = 0;
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext.
///
- /// \param Decls Vector that will contain the declarations loaded
- /// from the external source. The caller is responsible for merging
- /// these declarations with any declarations already stored in the
- /// declaration context.
- ///
- /// \returns true if there was an error while reading the
- /// declarations for this declaration context.
- virtual bool ReadDeclsLexicallyInContext(DeclContext *DC,
- llvm::SmallVectorImpl<uint32_t> &Decls) = 0;
-
- /// \brief Read all of the declarations visible from a declaration
- /// context.
- ///
- /// \param DC The declaration context whose visible declarations
- /// will be read.
- ///
- /// \param Decls A vector of visible declaration structures,
- /// providing the mapping from each name visible in the declaration
- /// context to the declaration IDs of declarations with that name.
- ///
- /// \returns true if there was an error while reading the
- /// declarations for this declaration context.
- virtual bool ReadDeclsVisibleInContext(DeclContext *DC,
- llvm::SmallVectorImpl<VisibleDeclaration> & Decls) = 0;
+ /// \return true if an error occurred
+ virtual bool FindExternalLexicalDecls(const DeclContext *DC,
+ llvm::SmallVectorImpl<Decl*> &Result) = 0;
/// \brief Function that will be invoked when we begin parsing a new
/// translation unit involving this external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
virtual void StartTranslationUnit(ASTConsumer *Consumer) { }
/// \brief Print any statistics that have been gathered regarding
/// the external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
virtual void PrintStats();
+
+protected:
+ /// \brief Initialize the context's lookup map with the given decls.
+ /// It is assumed that none of the declarations are redeclarations of
+ /// each other.
+ static void SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<VisibleDeclaration> &Decls);
+
+ /// \brief Initialize the context's lookup map with the given decls.
+ /// It is assumed that none of the declarations are redeclarations of
+ /// each other.
+ static void SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<NamedDecl*> &Decls);
+
+ static DeclContext::lookup_result
+ SetExternalVisibleDeclsForName(const DeclContext *DC,
+ const VisibleDeclaration &VD);
+
+ static DeclContext::lookup_result
+ SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl*> &Decls);
+
+ static DeclContext::lookup_result
+ SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name);
};
/// \brief A lazy pointer to an AST node (of base type T) that resides
@@ -185,7 +203,8 @@ public:
};
/// \brief A lazy pointer to a statement.
-typedef LazyOffsetPtr<Stmt, &ExternalASTSource::GetDeclStmt> LazyDeclStmtPtr;
+typedef LazyOffsetPtr<Stmt, &ExternalASTSource::GetExternalDeclStmt>
+ LazyDeclStmtPtr;
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Makefile b/contrib/llvm/tools/clang/include/clang/AST/Makefile
index a25977c..00a1e1b 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Makefile
+++ b/contrib/llvm/tools/clang/include/clang/AST/Makefile
@@ -1,13 +1,23 @@
-LEVEL = ../../../../..
-BUILT_SOURCES = StmtNodes.inc
+CLANG_LEVEL := ../../..
+TD_SRC_DIR = $(PROJ_SRC_DIR)/../Basic
+BUILT_SOURCES = Attrs.inc StmtNodes.inc DeclNodes.inc
TABLEGEN_INC_FILES_COMMON = 1
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
-INPUT_TDS = $(PROJ_SRC_DIR)/StmtNodes.td
+$(ObjDir)/Attrs.inc.tmp : $(TD_SRC_DIR)/Attr.td $(TBLGEN) \
+ $(ObjDir)/.dir
+ $(Echo) "Building Clang attribute classes with tblgen"
+ $(Verb) $(TableGen) -gen-clang-attr-classes -o $(call SYSPATH, $@) \
+ -I $(PROJ_SRC_DIR)/../../ $<
-$(ObjDir)/StmtNodes.inc.tmp : StmtNodes.td $(TBLGEN) $(ObjDir)/.dir
+$(ObjDir)/StmtNodes.inc.tmp : $(TD_SRC_DIR)/StmtNodes.td $(TBLGEN) \
+ $(ObjDir)/.dir
$(Echo) "Building Clang statement node tables with tblgen"
$(Verb) $(TableGen) -gen-clang-stmt-nodes -o $(call SYSPATH, $@) $<
+$(ObjDir)/DeclNodes.inc.tmp : $(TD_SRC_DIR)/DeclNodes.td $(TBLGEN) \
+ $(ObjDir)/.dir
+ $(Echo) "Building Clang declaration node tables with tblgen"
+ $(Verb) $(TableGen) -gen-clang-decl-nodes -o $(call SYSPATH, $@) $<
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
index 07865e0..0853ddd 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -29,739 +29,1634 @@
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+
+// The following three macros are used for meta programming. The code
+// using them is responsible for defining macro OPERATOR().
+
+// All unary operators.
+#define UNARYOP_LIST() \
+ OPERATOR(PostInc) OPERATOR(PostDec) \
+ OPERATOR(PreInc) OPERATOR(PreDec) \
+ OPERATOR(AddrOf) OPERATOR(Deref) \
+ OPERATOR(Plus) OPERATOR(Minus) \
+ OPERATOR(Not) OPERATOR(LNot) \
+ OPERATOR(Real) OPERATOR(Imag) \
+ OPERATOR(Extension) OPERATOR(OffsetOf)
+
+// All binary operators (excluding compound assign operators).
+#define BINOP_LIST() \
+ OPERATOR(PtrMemD) OPERATOR(PtrMemI) \
+ OPERATOR(Mul) OPERATOR(Div) OPERATOR(Rem) \
+ OPERATOR(Add) OPERATOR(Sub) OPERATOR(Shl) \
+ OPERATOR(Shr) \
+ \
+ OPERATOR(LT) OPERATOR(GT) OPERATOR(LE) \
+ OPERATOR(GE) OPERATOR(EQ) OPERATOR(NE) \
+ OPERATOR(And) OPERATOR(Xor) OPERATOR(Or) \
+ OPERATOR(LAnd) OPERATOR(LOr) \
+ \
+ OPERATOR(Assign) \
+ OPERATOR(Comma)
+
+// All compound assign operators.
+#define CAO_LIST() \
+ OPERATOR(Mul) OPERATOR(Div) OPERATOR(Rem) OPERATOR(Add) OPERATOR(Sub) \
+ OPERATOR(Shl) OPERATOR(Shr) OPERATOR(And) OPERATOR(Or) OPERATOR(Xor)
namespace clang {
-#define DISPATCH(NAME, CLASS, Var) \
-return getDerived().Visit ## NAME(static_cast<CLASS*>(Var))
-
-// We use preprocessor meta-programming to generate the Visit*()
-// methods for all subclasses of Stmt, Decl, and Type. Some of the
-// generated definitions, however, need to be customized. The
-// meta-programming technique we use doesn't let us select which
-// methods to generate. Therefore we have to generate ALL of them in
-// a helper class RecursiveASTVisitorImpl, and override the ones we
-// don't like in a child class RecursiveASTVisitor (C++ doesn't allow
-// overriding a method in the same class).
-//
-// Do not use this class directly - use RecursiveASTVisitor instead.
+// A helper macro to implement short-circuiting when recursing. It
+// invokes CALL_EXPR, which must be a method call, on the derived
+// object (s.t. a user of RecursiveASTVisitor can override the method
+// in CALL_EXPR).
+#define TRY_TO(CALL_EXPR) \
+ do { if (!getDerived().CALL_EXPR) return false; } while (0)
+
+/// \brief A class that does preorder depth-first traversal on the
+/// entire Clang AST and visits each node.
+///
+/// This class performs three distinct tasks:
+/// 1. traverse the AST (i.e. go to each node);
+/// 2. at a given node, walk up the class hierarchy, starting from
+/// the node's dynamic type, until the top-most class (e.g. Stmt,
+/// Decl, or Type) is reached.
+/// 3. given a (node, class) combination, where 'class' is some base
+/// class of the dynamic type of 'node', call a user-overridable
+/// function to actually visit the node.
+///
+/// These tasks are done by three groups of methods, respectively:
+/// 1. TraverseDecl(Decl *x) does task #1. It is the entry point
+/// for traversing an AST rooted at x. This method simply
+/// dispatches (i.e. forwards) to TraverseFoo(Foo *x) where Foo
+/// is the dynamic type of *x, which calls WalkUpFromFoo(x) and
+/// then recursively visits the child nodes of x.
+/// TraverseStmt(Stmt *x) and TraverseType(QualType x) work
+/// similarly.
+/// 2. WalkUpFromFoo(Foo *x) does task #2. It does not try to visit
+/// any child node of x. Instead, it first calls WalkUpFromBar(x)
+/// where Bar is the direct parent class of Foo (unless Foo has
+/// no parent), and then calls VisitFoo(x) (see the next list item).
+/// 3. VisitFoo(Foo *x) does task #3.
+///
+/// These three method groups are tiered (Traverse* > WalkUpFrom* >
+/// Visit*). A method (e.g. Traverse*) may call methods from the same
+/// tier (e.g. other Traverse*) or one tier lower (e.g. WalkUpFrom*).
+/// It may not call methods from a higher tier.
+///
+/// Note that since WalkUpFromFoo() calls WalkUpFromBar() (where Bar
+/// is Foo's super class) before calling VisitFoo(), the result is
+/// that the Visit*() methods for a given node are called in the
+/// top-down order (e.g. for a node of type NamedDecl, the order will
+/// be VisitDecl(), VisitNamedDecl(), and then VisitNamespaceDecl()).
+///
+/// This scheme guarantees that all Visit*() calls for the same AST
+/// node are grouped together. In other words, Visit*() methods for
+/// different nodes are never interleaved.
+///
+/// Clients of this visitor should subclass the visitor (providing
+/// themselves as the template argument, using the curiously recurring
+/// template pattern) and override any of the Traverse*, WalkUpFrom*,
+/// and Visit* methods for declarations, types, statements,
+/// expressions, or other AST nodes where the visitor should customize
+/// behavior. Most users only need to override Visit*. Advanced
+/// users may override Traverse* and WalkUpFrom* to implement custom
+/// traversal strategies. Returning false from one of these overridden
+/// functions will abort the entire traversal.
template<typename Derived>
-class RecursiveASTVisitorImpl {
+class RecursiveASTVisitor {
public:
/// \brief Return a reference to the derived class.
Derived &getDerived() { return *static_cast<Derived*>(this); }
/// \brief Recursively visit a statement or expression, by
- /// dispatching to Visit*() based on the argument's dynamic type.
- /// This is NOT meant to be overridden by a subclass.
+ /// dispatching to Traverse*() based on the argument's dynamic type.
///
- /// \returns true if the visitation was terminated early, false
+ /// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is NULL).
- bool Visit(Stmt *S);
+ bool TraverseStmt(Stmt *S);
/// \brief Recursively visit a type, by dispatching to
- /// Visit*Type() based on the argument's getTypeClass() property.
- /// This is NOT meant to be overridden by a subclass.
+ /// Traverse*Type() based on the argument's getTypeClass() property.
///
- /// \returns true if the visitation was terminated early, false
+ /// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is a Null type).
- bool Visit(QualType T);
+ bool TraverseType(QualType T);
+
+ /// \brief Recursively visit a type with location, by dispatching to
+ /// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
+ ///
+ /// \returns false if the visitation was terminated early, true
+ /// otherwise (including when the argument is a Null type location).
+ bool TraverseTypeLoc(TypeLoc TL);
/// \brief Recursively visit a declaration, by dispatching to
- /// Visit*Decl() based on the argument's dynamic type. This is
- /// NOT meant to be overridden by a subclass.
+ /// Traverse*Decl() based on the argument's dynamic type.
///
- /// \returns true if the visitation was terminated early, false
+ /// \returns false if the visitation was terminated early, true
/// otherwise (including when the argument is NULL).
- bool Visit(Decl *D);
+ bool TraverseDecl(Decl *D);
/// \brief Recursively visit a C++ nested-name-specifier.
///
- /// \returns true if the visitation was terminated early, false otherwise.
- bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
- /// \brief Recursively visit a template name.
+ /// \brief Recursively visit a template name and dispatch to the
+ /// appropriate method.
///
- /// \returns true if the visitation was terminated early, false otherwise.
- bool VisitTemplateName(TemplateName Template);
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseTemplateName(TemplateName Template);
- /// \brief Recursively visit a template argument.
+ /// \brief Recursively visit a template argument and dispatch to the
+ /// appropriate method for the argument type.
///
- /// \returns true if the visitation was terminated early, false otherwise.
- bool VisitTemplateArgument(const TemplateArgument &Arg);
+ /// \returns false if the visitation was terminated early, true otherwise.
+ // FIXME: migrate callers to TemplateArgumentLoc instead.
+ bool TraverseTemplateArgument(const TemplateArgument &Arg);
- /// \brief Recursively visit a set of template arguments.
+ /// \brief Recursively visit a template argument location and dispatch to the
+ /// appropriate method for the argument type.
///
- /// \returns true if the visitation was terminated early, false otherwise.
- bool VisitTemplateArguments(const TemplateArgument *Args, unsigned NumArgs);
-
- // If the implementation chooses not to implement a certain visit method, fall
- // back on VisitExpr or whatever else is the superclass.
-#define STMT(CLASS, PARENT) \
-bool Visit ## CLASS(CLASS *S) { DISPATCH(PARENT, PARENT, S); }
-#include "clang/AST/StmtNodes.inc"
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc);
- // If the implementation doesn't implement binary operator methods, fall back
- // on VisitBinaryOperator.
-#define BINOP_FALLBACK(NAME) \
-bool VisitBin ## NAME(BinaryOperator *S) { \
-DISPATCH(BinaryOperator, BinaryOperator, S); \
-}
- BINOP_FALLBACK(PtrMemD) BINOP_FALLBACK(PtrMemI)
- BINOP_FALLBACK(Mul) BINOP_FALLBACK(Div) BINOP_FALLBACK(Rem)
- BINOP_FALLBACK(Add) BINOP_FALLBACK(Sub) BINOP_FALLBACK(Shl)
- BINOP_FALLBACK(Shr)
-
- BINOP_FALLBACK(LT) BINOP_FALLBACK(GT) BINOP_FALLBACK(LE)
- BINOP_FALLBACK(GE) BINOP_FALLBACK(EQ) BINOP_FALLBACK(NE)
- BINOP_FALLBACK(And) BINOP_FALLBACK(Xor) BINOP_FALLBACK(Or)
- BINOP_FALLBACK(LAnd) BINOP_FALLBACK(LOr)
-
- BINOP_FALLBACK(Assign)
- BINOP_FALLBACK(Comma)
-#undef BINOP_FALLBACK
-
- // If the implementation doesn't implement compound assignment operator
- // methods, fall back on VisitCompoundAssignOperator.
-#define CAO_FALLBACK(NAME) \
-bool VisitBin ## NAME(CompoundAssignOperator *S) { \
-DISPATCH(CompoundAssignOperator, CompoundAssignOperator, S); \
-}
- CAO_FALLBACK(MulAssign) CAO_FALLBACK(DivAssign) CAO_FALLBACK(RemAssign)
- CAO_FALLBACK(AddAssign) CAO_FALLBACK(SubAssign) CAO_FALLBACK(ShlAssign)
- CAO_FALLBACK(ShrAssign) CAO_FALLBACK(AndAssign) CAO_FALLBACK(OrAssign)
- CAO_FALLBACK(XorAssign)
-#undef CAO_FALLBACK
-
- // If the implementation doesn't implement unary operator methods, fall back
- // on VisitUnaryOperator.
-#define UNARYOP_FALLBACK(NAME) \
-bool VisitUnary ## NAME(UnaryOperator *S) { \
-DISPATCH(UnaryOperator, UnaryOperator, S); \
-}
- UNARYOP_FALLBACK(PostInc) UNARYOP_FALLBACK(PostDec)
- UNARYOP_FALLBACK(PreInc) UNARYOP_FALLBACK(PreDec)
- UNARYOP_FALLBACK(AddrOf) UNARYOP_FALLBACK(Deref)
-
- UNARYOP_FALLBACK(Plus) UNARYOP_FALLBACK(Minus)
- UNARYOP_FALLBACK(Not) UNARYOP_FALLBACK(LNot)
- UNARYOP_FALLBACK(Real) UNARYOP_FALLBACK(Imag)
- UNARYOP_FALLBACK(Extension) UNARYOP_FALLBACK(OffsetOf)
-#undef UNARYOP_FALLBACK
-
- /// \brief Basis for statement and expression visitation, which
- /// visits all of the substatements and subexpressions.
+ /// \brief Recursively visit a set of template arguments.
+ /// This can be overridden by a subclass, but it's not expected that
+ /// will be needed -- this visitor always dispatches to another.
///
- /// The relation between Visit(Stmt *S) and this method is that
- /// the former dispatches to Visit*() based on S's dynamic type,
- /// which forwards the call up the inheritance chain until
- /// reaching VisitStmt(), which then calls Visit() on each
- /// substatement/subexpression.
- bool VisitStmt(Stmt *S);
-
- /// \brief Basis for type visitation, which by default does nothing.
+ /// \returns false if the visitation was terminated early, true otherwise.
+ // FIXME: take a TemplateArgumentLoc* (or TemplateArgumentListInfo) instead.
+ bool TraverseTemplateArguments(const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ /// \brief Recursively visit a constructor initializer. This
+ /// automatically dispatches to another visitor for the initializer
+ /// expression, but not for the name of the initializer, so may
+ /// be overridden for clients that need access to the name.
///
- /// The relation between Visit(QualType T) and this method is
- /// that the former dispatches to Visit*Type(), which forwards the
- /// call up the inheritance chain until reaching VisitType().
- bool VisitType(Type *T);
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseConstructorInitializer(CXXBaseOrMemberInitializer *Init);
+
+ // ---- Methods on Stmts ----
-#define TYPE(Class, Base) \
- bool Visit##Class##Type(Class##Type *T);
+ // Declare Traverse*() for all concrete Stmt classes.
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ bool Traverse##CLASS(CLASS *S);
+#include "clang/AST/StmtNodes.inc"
+ // The above header #undefs ABSTRACT_STMT and STMT upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Stmt classes.
+ bool WalkUpFromStmt(Stmt *S) { return getDerived().VisitStmt(S); }
+ bool VisitStmt(Stmt *S) { return true; }
+#define STMT(CLASS, PARENT) \
+ bool WalkUpFrom##CLASS(CLASS *S) { \
+ TRY_TO(WalkUpFrom##PARENT(S)); \
+ TRY_TO(Visit##CLASS(S)); \
+ return true; \
+ } \
+ bool Visit##CLASS(CLASS *S) { return true; }
+#include "clang/AST/StmtNodes.inc"
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for unary
+ // operator methods. Unary operators are not classes in themselves
+ // (they're all opcodes in UnaryOperator) but do have visitors.
+#define OPERATOR(NAME) \
+ bool TraverseUnary##NAME(UnaryOperator *S) { \
+ TRY_TO(WalkUpFromUnary##NAME(S)); \
+ TRY_TO(TraverseStmt(S->getSubExpr())); \
+ return true; \
+ } \
+ bool WalkUpFromUnary##NAME(UnaryOperator *S) { \
+ TRY_TO(WalkUpFromUnaryOperator(S)); \
+ TRY_TO(VisitUnary##NAME(S)); \
+ return true; \
+ } \
+ bool VisitUnary##NAME(UnaryOperator *S) { return true; }
+
+ UNARYOP_LIST()
+#undef OPERATOR
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for binary
+ // operator methods. Binary operators are not classes in themselves
+ // (they're all opcodes in BinaryOperator) but do have visitors.
+#define GENERAL_BINOP_FALLBACK(NAME, BINOP_TYPE) \
+ bool TraverseBin##NAME(BINOP_TYPE *S) { \
+ TRY_TO(WalkUpFromBin##NAME(S)); \
+ TRY_TO(TraverseStmt(S->getLHS())); \
+ TRY_TO(TraverseStmt(S->getRHS())); \
+ return true; \
+ } \
+ bool WalkUpFromBin##NAME(BINOP_TYPE *S) { \
+ TRY_TO(WalkUpFrom##BINOP_TYPE(S)); \
+ TRY_TO(VisitBin##NAME(S)); \
+ return true; \
+ } \
+ bool VisitBin##NAME(BINOP_TYPE *S) { return true; }
+
+#define OPERATOR(NAME) GENERAL_BINOP_FALLBACK(NAME, BinaryOperator)
+ BINOP_LIST()
+#undef OPERATOR
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for compound
+ // assignment methods. Compound assignment operators are not
+ // classes in themselves (they're all opcodes in
+ // CompoundAssignOperator) but do have visitors.
+#define OPERATOR(NAME) \
+ GENERAL_BINOP_FALLBACK(NAME##Assign, CompoundAssignOperator)
+
+ CAO_LIST()
+#undef OPERATOR
+#undef GENERAL_BINOP_FALLBACK
+
+ // ---- Methods on Types ----
+ // FIXME: revamp to take TypeLoc's rather than Types.
+
+ // Declare Traverse*() for all concrete Type classes.
+#define ABSTRACT_TYPE(CLASS, BASE)
+#define TYPE(CLASS, BASE) \
+ bool Traverse##CLASS##Type(CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+ // The above header #undefs ABSTRACT_TYPE and TYPE upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Type classes.
+ bool WalkUpFromType(Type *T) { return getDerived().VisitType(T); }
+ bool VisitType(Type *T) { return true; }
+#define TYPE(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##Type(CLASS##Type *T) { \
+ TRY_TO(WalkUpFrom##BASE(T)); \
+ TRY_TO(Visit##CLASS##Type(T)); \
+ return true; \
+ } \
+ bool Visit##CLASS##Type(CLASS##Type *T) { return true; }
#include "clang/AST/TypeNodes.def"
- /// \brief Basis for declaration and definition visitation, which
- /// visits all of the subnodes.
- ///
- /// The relation between Visit(Decl *) and this method is that the
- /// former dispatches to Visit*Decl(), which forwards the call up
- /// the inheritance chain until reaching VisitDecl().
- bool VisitDecl(Decl *D);
-
-#define DECL(Class, Base) \
- bool Visit##Class##Decl(Class##Decl *D) { \
- return getDerived().Visit##Base(D); \
+ // ---- Methods on TypeLocs ----
+ // FIXME: this currently just calls the matching Type methods
+
+ // Declare Traverse*() for all concrete Type classes.
+#define ABSTRACT_TYPELOC(CLASS, BASE)
+#define TYPELOC(CLASS, BASE) \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+#include "clang/AST/TypeLocNodes.def"
+ // The above header #undefs ABSTRACT_TYPELOC and TYPELOC upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all TypeLoc classes.
+ bool WalkUpFromTypeLoc(TypeLoc TL) { return getDerived().VisitTypeLoc(TL); }
+ bool VisitTypeLoc(TypeLoc TL) { return true; }
+
+ // QualifiedTypeLoc and UnqualTypeLoc are not declared in
+ // TypeNodes.def and thus need to be handled specially.
+ bool WalkUpFromQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ return getDerived().VisitUnqualTypeLoc(TL.getUnqualifiedLoc());
}
-#define ABSTRACT_DECL(Class, Base) DECL(Class, Base)
-#include "clang/AST/DeclNodes.def"
+ bool VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { return true; }
+ bool WalkUpFromUnqualTypeLoc(UnqualTypeLoc TL) {
+ return getDerived().VisitUnqualTypeLoc(TL.getUnqualifiedLoc());
+ }
+ bool VisitUnqualTypeLoc(UnqualTypeLoc TL) { return true; }
+
+ // Note that BASE includes trailing 'Type' which CLASS doesn't.
+#define TYPE(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##TypeLoc(CLASS##TypeLoc TL) { \
+ TRY_TO(WalkUpFrom##BASE##Loc(TL)); \
+ TRY_TO(Visit##CLASS##TypeLoc(TL)); \
+ return true; \
+ } \
+ bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TL) { return true; }
+#include "clang/AST/TypeNodes.def"
+
+ // ---- Methods on Decls ----
+
+ // Declare Traverse*() for all concrete Decl classes.
+#define ABSTRACT_DECL(DECL)
+#define DECL(CLASS, BASE) \
+ bool Traverse##CLASS##Decl(CLASS##Decl *D);
+#include "clang/AST/DeclNodes.inc"
+ // The above header #undefs ABSTRACT_DECL and DECL upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Decl classes.
+ bool WalkUpFromDecl(Decl *D) { return getDerived().VisitDecl(D); }
+ bool VisitDecl(Decl *D) { return true; }
+#define DECL(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##Decl(CLASS##Decl *D) { \
+ TRY_TO(WalkUpFrom##BASE(D)); \
+ TRY_TO(Visit##CLASS##Decl(D)); \
+ return true; \
+ } \
+ bool Visit##CLASS##Decl(CLASS##Decl *D) { return true; }
+#include "clang/AST/DeclNodes.inc"
+
+private:
+ // These are helper methods used by more than one Traverse* method.
+ bool TraverseTemplateParameterListHelper(TemplateParameterList *TPL);
+ bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
+ unsigned Count);
+ bool TraverseRecordHelper(RecordDecl *D);
+ bool TraverseCXXRecordHelper(CXXRecordDecl *D);
+ bool TraverseDeclaratorHelper(DeclaratorDecl *D);
+ bool TraverseDeclContextHelper(DeclContext *DC);
+ bool TraverseFunctionHelper(FunctionDecl *D);
+ bool TraverseVarHelper(VarDecl *D);
};
+#define DISPATCH(NAME, CLASS, VAR) \
+ return getDerived().Traverse##NAME(static_cast<CLASS*>(VAR))
+
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::Visit(Stmt *S) {
+bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S) {
if (!S)
- return false;
+ return true;
// If we have a binary expr, dispatch to the subcode of the binop. A smart
// optimizer (e.g. LLVM) will fold this comparison into the switch stmt
// below.
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
switch (BinOp->getOpcode()) {
- case BinaryOperator::PtrMemD: DISPATCH(BinPtrMemD, BinaryOperator, S);
- case BinaryOperator::PtrMemI: DISPATCH(BinPtrMemI, BinaryOperator, S);
- case BinaryOperator::Mul: DISPATCH(BinMul, BinaryOperator, S);
- case BinaryOperator::Div: DISPATCH(BinDiv, BinaryOperator, S);
- case BinaryOperator::Rem: DISPATCH(BinRem, BinaryOperator, S);
- case BinaryOperator::Add: DISPATCH(BinAdd, BinaryOperator, S);
- case BinaryOperator::Sub: DISPATCH(BinSub, BinaryOperator, S);
- case BinaryOperator::Shl: DISPATCH(BinShl, BinaryOperator, S);
- case BinaryOperator::Shr: DISPATCH(BinShr, BinaryOperator, S);
-
- case BinaryOperator::LT: DISPATCH(BinLT, BinaryOperator, S);
- case BinaryOperator::GT: DISPATCH(BinGT, BinaryOperator, S);
- case BinaryOperator::LE: DISPATCH(BinLE, BinaryOperator, S);
- case BinaryOperator::GE: DISPATCH(BinGE, BinaryOperator, S);
- case BinaryOperator::EQ: DISPATCH(BinEQ, BinaryOperator, S);
- case BinaryOperator::NE: DISPATCH(BinNE, BinaryOperator, S);
-
- case BinaryOperator::And: DISPATCH(BinAnd, BinaryOperator, S);
- case BinaryOperator::Xor: DISPATCH(BinXor, BinaryOperator, S);
- case BinaryOperator::Or : DISPATCH(BinOr, BinaryOperator, S);
- case BinaryOperator::LAnd: DISPATCH(BinLAnd, BinaryOperator, S);
- case BinaryOperator::LOr : DISPATCH(BinLOr, BinaryOperator, S);
- case BinaryOperator::Assign: DISPATCH(BinAssign, BinaryOperator, S);
- case BinaryOperator::MulAssign:
- DISPATCH(BinMulAssign, CompoundAssignOperator, S);
- case BinaryOperator::DivAssign:
- DISPATCH(BinDivAssign, CompoundAssignOperator, S);
- case BinaryOperator::RemAssign:
- DISPATCH(BinRemAssign, CompoundAssignOperator, S);
- case BinaryOperator::AddAssign:
- DISPATCH(BinAddAssign, CompoundAssignOperator, S);
- case BinaryOperator::SubAssign:
- DISPATCH(BinSubAssign, CompoundAssignOperator, S);
- case BinaryOperator::ShlAssign:
- DISPATCH(BinShlAssign, CompoundAssignOperator, S);
- case BinaryOperator::ShrAssign:
- DISPATCH(BinShrAssign, CompoundAssignOperator, S);
- case BinaryOperator::AndAssign:
- DISPATCH(BinAndAssign, CompoundAssignOperator, S);
- case BinaryOperator::OrAssign:
- DISPATCH(BinOrAssign, CompoundAssignOperator, S);
- case BinaryOperator::XorAssign:
- DISPATCH(BinXorAssign, CompoundAssignOperator, S);
- case BinaryOperator::Comma: DISPATCH(BinComma, BinaryOperator, S);
+#define OPERATOR(NAME) \
+ case BinaryOperator::NAME: DISPATCH(Bin##PtrMemD, BinaryOperator, S);
+
+ BINOP_LIST()
+#undef OPERATOR
+#undef BINOP_LIST
+
+#define OPERATOR(NAME) \
+ case BinaryOperator::NAME##Assign: \
+ DISPATCH(Bin##NAME##Assign, CompoundAssignOperator, S);
+
+ CAO_LIST()
+#undef OPERATOR
+#undef CAO_LIST
}
} else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(S)) {
switch (UnOp->getOpcode()) {
- case UnaryOperator::PostInc: DISPATCH(UnaryPostInc, UnaryOperator, S);
- case UnaryOperator::PostDec: DISPATCH(UnaryPostDec, UnaryOperator, S);
- case UnaryOperator::PreInc: DISPATCH(UnaryPreInc, UnaryOperator, S);
- case UnaryOperator::PreDec: DISPATCH(UnaryPreDec, UnaryOperator, S);
- case UnaryOperator::AddrOf: DISPATCH(UnaryAddrOf, UnaryOperator, S);
- case UnaryOperator::Deref: DISPATCH(UnaryDeref, UnaryOperator, S);
- case UnaryOperator::Plus: DISPATCH(UnaryPlus, UnaryOperator, S);
- case UnaryOperator::Minus: DISPATCH(UnaryMinus, UnaryOperator, S);
- case UnaryOperator::Not: DISPATCH(UnaryNot, UnaryOperator, S);
- case UnaryOperator::LNot: DISPATCH(UnaryLNot, UnaryOperator, S);
- case UnaryOperator::Real: DISPATCH(UnaryReal, UnaryOperator, S);
- case UnaryOperator::Imag: DISPATCH(UnaryImag, UnaryOperator, S);
- case UnaryOperator::Extension: DISPATCH(UnaryExtension, UnaryOperator, S);
- case UnaryOperator::OffsetOf: DISPATCH(UnaryOffsetOf, UnaryOperator, S);
+#define OPERATOR(NAME) \
+ case UnaryOperator::NAME: DISPATCH(Unary##NAME, UnaryOperator, S);
+
+ UNARYOP_LIST()
+#undef OPERATOR
+#undef UNARYOP_LIST
}
}
- // Top switch stmt: dispatch to VisitFooStmt for each FooStmt.
+ // Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
switch (S->getStmtClass()) {
case Stmt::NoStmtClass: break;
#define ABSTRACT_STMT(STMT)
-#define STMT(CLASS, PARENT) \
-case Stmt::CLASS ## Class: DISPATCH(CLASS, CLASS, S);
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: DISPATCH(CLASS, CLASS, S);
#include "clang/AST/StmtNodes.inc"
}
- return false;
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::Visit(QualType T) {
+bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
if (T.isNull())
- return false;
+ return true;
switch (T->getTypeClass()) {
-#define ABSTRACT_TYPE(Class, Base)
-#define TYPE(Class, Base) \
- case Type::Class: DISPATCH(Class##Type, Class##Type, T.getTypePtr());
+#define ABSTRACT_TYPE(CLASS, BASE)
+#define TYPE(CLASS, BASE) \
+ case Type::CLASS: DISPATCH(CLASS##Type, CLASS##Type, T.getTypePtr());
#include "clang/AST/TypeNodes.def"
}
- return false;
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::Visit(Decl *D) {
+bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
+ if (TL.isNull())
+ return true;
+
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, BASE)
+#define TYPELOC(CLASS, BASE) \
+ case TypeLoc::CLASS: \
+ return getDerived().Traverse##CLASS##TypeLoc(*cast<CLASS##TypeLoc>(&TL));
+#include "clang/AST/TypeLocNodes.def"
+ }
+
+ return true;
+}
+
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
if (!D)
- return false;
+ return true;
+
+ // As a syntax visitor, we want to ignore declarations for
+ // implicitly-defined declarations (ones not typed explicitly by the
+ // user).
+ if (D->isImplicit())
+ return true;
switch (D->getKind()) {
-#define ABSTRACT_DECL(Class, Base)
-#define DECL(Class, Base) \
- case Decl::Class: DISPATCH(Class##Decl, Class##Decl, D);
-#include "clang/AST/DeclNodes.def"
- }
+#define ABSTRACT_DECL(DECL)
+#define DECL(CLASS, BASE) \
+ case Decl::CLASS: DISPATCH(CLASS##Decl, CLASS##Decl, D);
+#include "clang/AST/DeclNodes.inc"
+ }
- return false;
+ return true;
}
+#undef DISPATCH
+
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitNestedNameSpecifier(
+bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifier(
NestedNameSpecifier *NNS) {
- if (NNS->getPrefix() &&
- getDerived().VisitNestedNameSpecifier(NNS->getPrefix()))
+ if (!NNS)
return true;
+ if (NNS->getPrefix())
+ TRY_TO(TraverseNestedNameSpecifier(NNS->getPrefix()));
+
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::Global:
- return false;
+ return true;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
- return Visit(QualType(NNS->getAsType(), 0));
+ TRY_TO(TraverseType(QualType(NNS->getAsType(), 0)));
}
- return false;
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTemplateName(TemplateName Template) {
+bool RecursiveASTVisitor<Derived>::TraverseTemplateName(TemplateName Template) {
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
- return DTN->getQualifier() &&
- getDerived().VisitNestedNameSpecifier(DTN->getQualifier());
-
- if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- return getDerived().VisitNestedNameSpecifier(QTN->getQualifier());
+ TRY_TO(TraverseNestedNameSpecifier(DTN->getQualifier()));
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ TRY_TO(TraverseNestedNameSpecifier(QTN->getQualifier()));
- return false;
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTemplateArgument(
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
const TemplateArgument &Arg) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
- return false;
+ return true;
case TemplateArgument::Type:
- return Visit(Arg.getAsType());
+ return getDerived().TraverseType(Arg.getAsType());
case TemplateArgument::Template:
- return getDerived().VisitTemplateName(Arg.getAsTemplate());
+ return getDerived().TraverseTemplateName(Arg.getAsTemplate());
case TemplateArgument::Expression:
- return getDerived().Visit(Arg.getAsExpr());
+ return getDerived().TraverseStmt(Arg.getAsExpr());
case TemplateArgument::Pack:
- return getDerived().VisitTemplateArguments(Arg.pack_begin(),
- Arg.pack_size());
+ return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
+ Arg.pack_size());
}
- return false;
+ return true;
}
+// FIXME: no template name location?
+// FIXME: no source locations for a template argument pack?
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTemplateArguments(
- const TemplateArgument *Args,
- unsigned NumArgs) {
- for (unsigned I = 0; I != NumArgs; ++I)
- if (getDerived().VisitTemplateArgument(Args[I]))
- return true;
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
+ const TemplateArgumentLoc &ArgLoc) {
+ const TemplateArgument &Arg = ArgLoc.getArgument();
- return false;
-}
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ return true;
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitStmt(Stmt *Node) {
- for (Stmt::child_iterator C = Node->child_begin(), CEnd = Node->child_end();
- C != CEnd; ++C) {
- if (Visit(*C))
- return true;
+ case TemplateArgument::Type: {
+ TypeSourceInfo *TSI = ArgLoc.getTypeSourceInfo();
+ return getDerived().TraverseTypeLoc(TSI->getTypeLoc());
}
- return false;
+ case TemplateArgument::Template:
+ return getDerived().TraverseTemplateName(Arg.getAsTemplate());
+
+ case TemplateArgument::Expression:
+ return getDerived().TraverseStmt(ArgLoc.getSourceExpression());
+
+ case TemplateArgument::Pack:
+ return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
+ Arg.pack_size());
+ }
+
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitType(Type *T) {
- return false;
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArguments(
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ TRY_TO(TraverseTemplateArgument(Args[I]));
+ }
+
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitBuiltinType(BuiltinType *T) {
- return getDerived().VisitType(T);
+bool RecursiveASTVisitor<Derived>::TraverseConstructorInitializer(
+ CXXBaseOrMemberInitializer *Init) {
+ // FIXME: recurse on TypeLoc of the base initializer if isBaseInitializer()?
+ if (Init->isWritten())
+ TRY_TO(TraverseStmt(Init->getInit()));
+ return true;
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitComplexType(ComplexType *T) {
- if (Visit(T->getElementType()))
- return true;
- return getDerived().VisitType(T);
-}
+// ----------------- Type traversal -----------------
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitPointerType(PointerType *T) {
- if (Visit(T->getPointeeType()))
- return true;
+// This macro makes available a variable T, the passed-in type.
+#define DEF_TRAVERSE_TYPE(TYPE, CODE) \
+ template<typename Derived> \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE (TYPE *T) { \
+ TRY_TO(WalkUpFrom##TYPE (T)); \
+ { CODE; } \
+ return true; \
+ }
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(BuiltinType, { })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitBlockPointerType(
- BlockPointerType *T) {
- if (Visit(T->getPointeeType()))
- return true;
+DEF_TRAVERSE_TYPE(ComplexType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(PointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitReferenceType(ReferenceType *T) {
- if (Visit(T->getPointeeType()))
- return true;
+DEF_TRAVERSE_TYPE(BlockPointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(LValueReferenceType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitLValueReferenceType(
- LValueReferenceType *T) {
- return getDerived().VisitReferenceType(T);
-}
+DEF_TRAVERSE_TYPE(RValueReferenceType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitRValueReferenceType(
- RValueReferenceType *T) {
- return getDerived().VisitReferenceType(T);
-}
+DEF_TRAVERSE_TYPE(MemberPointerType, {
+ TRY_TO(TraverseType(QualType(T->getClass(), 0)));
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitMemberPointerType(
- MemberPointerType *T) {
- if (Visit(QualType(T->getClass(), 0)) || Visit(T->getPointeeType()))
- return true;
+DEF_TRAVERSE_TYPE(ConstantArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(IncompleteArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitArrayType(ArrayType *T) {
- if (Visit(T->getElementType()))
- return true;
+DEF_TRAVERSE_TYPE(VariableArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(DependentSizedArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ if (T->getSizeExpr())
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitConstantArrayType(
- ConstantArrayType *T) {
- return getDerived().VisitArrayType(T);
-}
+DEF_TRAVERSE_TYPE(DependentSizedExtVectorType, {
+ if (T->getSizeExpr())
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ TRY_TO(TraverseType(T->getElementType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitIncompleteArrayType(
- IncompleteArrayType *T) {
- return getDerived().VisitArrayType(T);
-}
+DEF_TRAVERSE_TYPE(VectorType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitVariableArrayType(
- VariableArrayType *T) {
- if (Visit(T->getSizeExpr()))
- return true;
+DEF_TRAVERSE_TYPE(ExtVectorType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
- return getDerived().VisitArrayType(T);
-}
+DEF_TRAVERSE_TYPE(FunctionNoProtoType, {
+ TRY_TO(TraverseType(T->getResultType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitDependentSizedArrayType(
- DependentSizedArrayType *T) {
- if (T->getSizeExpr() && Visit(T->getSizeExpr()))
- return true;
+DEF_TRAVERSE_TYPE(FunctionProtoType, {
+ TRY_TO(TraverseType(T->getResultType()));
- return getDerived().VisitArrayType(T);
-}
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ TRY_TO(TraverseType(*A));
+ }
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitDependentSizedExtVectorType(
- DependentSizedExtVectorType *T) {
- if ((T->getSizeExpr() && Visit(T->getSizeExpr())) ||
- Visit(T->getElementType()))
- return true;
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ TRY_TO(TraverseType(*E));
+ }
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(UnresolvedUsingType, { })
+DEF_TRAVERSE_TYPE(TypedefType, { })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitVectorType(VectorType *T) {
- if (Visit(T->getElementType()))
- return true;
+DEF_TRAVERSE_TYPE(TypeOfExprType, {
+ TRY_TO(TraverseStmt(T->getUnderlyingExpr()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(TypeOfType, {
+ TRY_TO(TraverseType(T->getUnderlyingType()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitExtVectorType(ExtVectorType *T) {
- return getDerived().VisitVectorType(T);
-}
+DEF_TRAVERSE_TYPE(DecltypeType, {
+ TRY_TO(TraverseStmt(T->getUnderlyingExpr()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitFunctionType(FunctionType *T) {
- if (Visit(T->getResultType()))
- return true;
+DEF_TRAVERSE_TYPE(RecordType, { })
+DEF_TRAVERSE_TYPE(EnumType, { })
+DEF_TRAVERSE_TYPE(TemplateTypeParmType, { })
+DEF_TRAVERSE_TYPE(SubstTemplateTypeParmType, { })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ })
+
+DEF_TRAVERSE_TYPE(InjectedClassNameType, { })
+
+DEF_TRAVERSE_TYPE(ElaboratedType, {
+ if (T->getQualifier()) {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ }
+ TRY_TO(TraverseType(T->getNamedType()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentNameType, {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ })
+
+DEF_TRAVERSE_TYPE(ObjCInterfaceType, { })
+
+DEF_TRAVERSE_TYPE(ObjCObjectType, {
+ // We have to watch out here because an ObjCInterfaceType's base
+ // type is itself.
+ if (T->getBaseType().getTypePtr() != T)
+ TRY_TO(TraverseType(T->getBaseType()));
+ })
+
+DEF_TRAVERSE_TYPE(ObjCObjectPointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+#undef DEF_TRAVERSE_TYPE
+
+// ----------------- TypeLoc traversal -----------------
+
+// This macro makes available a variable TL, the passed-in TypeLoc.
+// It calls WalkUpFrom* for the Type in the given TypeLoc, in addition
+// to WalkUpFrom* for the TypeLoc itself, such that existing clients
+// that override the WalkUpFrom*Type() and/or Visit*Type() methods
+// continue to work.
+#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
+ template<typename Derived> \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
+ TRY_TO(WalkUpFrom##TYPE(TL.getTypePtr())); \
+ TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
+ { CODE; } \
+ return true; \
+ }
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitFunctionNoProtoType(
- FunctionNoProtoType *T) {
- return getDerived().VisitFunctionType(T);
+bool RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(
+ QualifiedTypeLoc TL) {
+ // Move this over to the 'main' typeloc tree. Note that this is a
+ // move -- we pretend that we were really looking at the unqualified
+ // typeloc all along -- rather than a recursion, so we don't follow
+ // the normal CRTP plan of going through
+ // getDerived().TraverseTypeLoc. If we did, we'd be traversing
+ // twice for the same type (once as a QualifiedTypeLoc version of
+ // the type, once as an UnqualifiedTypeLoc version of the type),
+ // which in effect means we'd call VisitTypeLoc twice with the
+ // 'same' type. This solves that problem, at the cost of never
+ // seeing the qualified version of the type (unless the client
+ // subclasses TraverseQualifiedTypeLoc themselves). It's not a
+ // perfect solution. A perfect solution probably requires making
+ // QualifiedTypeLoc a wrapper around TypeLoc -- like QualType is a
+ // wrapper around Type* -- rather than being its own class in the
+ // type hierarchy.
+ return TraverseTypeLoc(TL.getUnqualifiedLoc());
}
+DEF_TRAVERSE_TYPELOC(BuiltinType, { })
+
+// FIXME: ComplexTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(ComplexType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPELOC(PointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(BlockPointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(LValueReferenceType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(RValueReferenceType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+// FIXME: location of base class?
+// We traverse this in the type case as well, but how is it not reached through
+// the pointee type?
+DEF_TRAVERSE_TYPELOC(MemberPointerType, {
+ TRY_TO(TraverseType(QualType(TL.getTypePtr()->getClass(), 0)));
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(ConstantArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(IncompleteArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(VariableArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getSizeExpr()));
+ })
+
+DEF_TRAVERSE_TYPELOC(DependentSizedArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ if (TL.getTypePtr()->getSizeExpr())
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getSizeExpr()));
+ })
+
+// FIXME: order? why not size expr first?
+// FIXME: base VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(DependentSizedExtVectorType, {
+ if (TL.getTypePtr()->getSizeExpr())
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getSizeExpr()));
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+// FIXME: VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(VectorType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+// FIXME: size and attributes
+// FIXME: base VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(ExtVectorType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPELOC(FunctionNoProtoType, {
+ TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
+ })
+
+// FIXME: location of arguments, exception specifications (attributes?)
+// Note that we have the ParmVarDecl's here. Do we want to use them?
+DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
+ TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
+
+ FunctionProtoType *T = TL.getTypePtr();
+/*
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseDecl(TL.getArg(I)));
+ }
+*/
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ TRY_TO(TraverseType(*A));
+ }
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ TRY_TO(TraverseType(*E));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, { })
+DEF_TRAVERSE_TYPELOC(TypedefType, { })
+
+DEF_TRAVERSE_TYPELOC(TypeOfExprType, {
+ TRY_TO(TraverseStmt(TL.getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPELOC(TypeOfType, {
+ TRY_TO(TraverseTypeLoc(TL.getUnderlyingTInfo()->getTypeLoc()));
+ })
+
+// FIXME: location of underlying expr
+DEF_TRAVERSE_TYPELOC(DecltypeType, {
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPELOC(RecordType, { })
+DEF_TRAVERSE_TYPELOC(EnumType, { })
+DEF_TRAVERSE_TYPELOC(TemplateTypeParmType, { })
+DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmType, { })
+
+// FIXME: use the loc for the template name?
+DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
+ TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(InjectedClassNameType, { })
+
+// FIXME: use the sourceloc on qualifier?
+DEF_TRAVERSE_TYPELOC(ElaboratedType, {
+ if (TL.getTypePtr()->getQualifier()) {
+ TRY_TO(TraverseNestedNameSpecifier(TL.getTypePtr()->getQualifier()));
+ }
+ TRY_TO(TraverseTypeLoc(TL.getNamedTypeLoc()));
+ })
+
+// FIXME: use the sourceloc on qualifier?
+DEF_TRAVERSE_TYPELOC(DependentNameType, {
+ TRY_TO(TraverseNestedNameSpecifier(TL.getTypePtr()->getQualifier()));
+ })
+
+DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
+ TRY_TO(TraverseNestedNameSpecifier(TL.getTypePtr()->getQualifier()));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(ObjCInterfaceType, { })
+
+DEF_TRAVERSE_TYPELOC(ObjCObjectType, {
+ // We have to watch out here because an ObjCInterfaceType's base
+ // type is itself.
+ if (TL.getTypePtr()->getBaseType().getTypePtr() != TL.getTypePtr())
+ TRY_TO(TraverseTypeLoc(TL.getBaseLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(ObjCObjectPointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+#undef DEF_TRAVERSE_TYPELOC
+
+// ----------------- Decl traversal -----------------
+//
+// For a Decl, we automate (in the DEF_TRAVERSE_DECL macro) traversing
+// the children that come from the DeclContext associated with it.
+// Therefore each Traverse* only needs to worry about children other
+// than those.
+
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitFunctionProtoType(
- FunctionProtoType *T) {
- for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
- AEnd = T->arg_type_end();
- A != AEnd; ++A) {
- if (Visit(*A))
- return true;
- }
+bool RecursiveASTVisitor<Derived>::TraverseDeclContextHelper(DeclContext *DC) {
+ if (!DC)
+ return true;
- for (FunctionProtoType::exception_iterator E = T->exception_begin(),
- EEnd = T->exception_end();
- E != EEnd; ++E) {
- if (Visit(*E))
- return true;
+ for (DeclContext::decl_iterator Child = DC->decls_begin(),
+ ChildEnd = DC->decls_end();
+ Child != ChildEnd; ++Child) {
+ TRY_TO(TraverseDecl(*Child));
}
- return getDerived().VisitFunctionType(T);
+ return true;
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitUnresolvedUsingType(
- UnresolvedUsingType *T) {
- return getDerived().VisitType(T);
+// This macro makes available a variable D, the passed-in decl.
+#define DEF_TRAVERSE_DECL(DECL, CODE) \
+template<typename Derived> \
+bool RecursiveASTVisitor<Derived>::Traverse##DECL (DECL *D) { \
+ TRY_TO(WalkUpFrom##DECL (D)); \
+ { CODE; } \
+ TRY_TO(TraverseDeclContextHelper(dyn_cast<DeclContext>(D))); \
+ return true; \
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTypedefType(TypedefType *T) {
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(AccessSpecDecl, { })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTypeOfExprType(TypeOfExprType *T) {
- if (Visit(T->getUnderlyingExpr()))
- return true;
+DEF_TRAVERSE_DECL(BlockDecl, {
+ // We don't traverse nodes in param_begin()/param_end(), as they
+ // appear in decls_begin()/decls_end() and thus are handled by the
+ // DEF_TRAVERSE_DECL macro already.
+ TRY_TO(TraverseStmt(D->getBody()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(FileScopeAsmDecl, {
+ TRY_TO(TraverseStmt(D->getAsmString()));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTypeOfType(TypeOfType *T) {
- if (Visit(T->getUnderlyingType()))
- return true;
+DEF_TRAVERSE_DECL(FriendDecl, {
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(FriendTemplateDecl, {
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
+ for (unsigned I = 0, E = D->getNumTemplateParameters(); I < E; ++I) {
+ TemplateParameterList *TPL = D->getTemplateParameterList(I);
+ for (TemplateParameterList::iterator ITPL = TPL->begin(),
+ ETPL = TPL->end();
+ ITPL != ETPL; ++ITPL) {
+ TRY_TO(TraverseDecl(*ITPL));
+ }
+ }
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitDecltypeType(DecltypeType *T) {
- if (Visit(T->getUnderlyingExpr()))
+DEF_TRAVERSE_DECL(LinkageSpecDecl, { })
+
+DEF_TRAVERSE_DECL(ObjCClassDecl, {
+ // FIXME: implement this
+ })
+
+DEF_TRAVERSE_DECL(ObjCForwardProtocolDecl, {
+ // FIXME: implement this
+ })
+
+DEF_TRAVERSE_DECL(ObjCPropertyImplDecl, {
+ // FIXME: implement this
+ })
+
+DEF_TRAVERSE_DECL(StaticAssertDecl, {
+ TRY_TO(TraverseStmt(D->getAssertExpr()));
+ TRY_TO(TraverseStmt(D->getMessage()));
+ })
+
+DEF_TRAVERSE_DECL(TranslationUnitDecl, {
+ // Code in an unnamed namespace shows up automatically in
+ // decls_begin()/decls_end(). Thus we don't need to recurse on
+ // D->getAnonymousNamespace().
+ })
+
+DEF_TRAVERSE_DECL(NamespaceAliasDecl, {
+ // We shouldn't traverse an aliased namespace, since it will be
+ // defined (and, therefore, traversed) somewhere else.
+ //
+ // This return statement makes sure the traversal of nodes in
+ // decls_begin()/decls_end() (done in the DEF_TRAVERSE_DECL macro)
+ // is skipped - don't remove it.
return true;
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(NamespaceDecl, {
+ // Code in an unnamed namespace shows up automatically in
+ // decls_begin()/decls_end(). Thus we don't need to recurse on
+ // D->getAnonymousNamespace().
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTagType(TagType *T) {
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(ObjCCompatibleAliasDecl, {
+ // FIXME: implement
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitRecordType(RecordType *T) {
- return getDerived().VisitTagType(T);
-}
+DEF_TRAVERSE_DECL(ObjCCategoryDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCCategoryImplDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCImplementationDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCInterfaceDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCProtocolDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCMethodDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCPropertyDecl, {
+ // FIXME: implement
+ })
+DEF_TRAVERSE_DECL(UsingDecl, {
+ TRY_TO(TraverseNestedNameSpecifier(D->getTargetNestedNameDecl()));
+ })
+
+DEF_TRAVERSE_DECL(UsingDirectiveDecl, {
+ TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
+ })
+
+DEF_TRAVERSE_DECL(UsingShadowDecl, { })
+
+// A helper method for TemplateDecl's children.
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitEnumType(EnumType *T) {
- return getDerived().VisitType(T);
+bool RecursiveASTVisitor<Derived>::TraverseTemplateParameterListHelper(
+ TemplateParameterList *TPL) {
+ if (TPL) {
+ for (TemplateParameterList::iterator I = TPL->begin(), E = TPL->end();
+ I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ }
+ return true;
}
+DEF_TRAVERSE_DECL(ClassTemplateDecl, {
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+ // We should not traverse the specializations/partial
+ // specializations. Those will show up in other contexts.
+ // getInstantiatedFromMemberTemplate() is just a link from a
+ // template instantiation back to the template from which it was
+ // instantiated, and thus should not be traversed either.
+ })
+
+DEF_TRAVERSE_DECL(FunctionTemplateDecl, {
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+ })
+
+DEF_TRAVERSE_DECL(TemplateTemplateParmDecl, {
+ // D is the "T" in something like
+ // template <template <typename> class T> class container { };
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ if (D->hasDefaultArgument()) {
+ TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
+ }
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+ })
+
+DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
+ // D is the "T" in something like "template<typename T> class vector;"
+ if (D->hasDefaultArgument())
+ TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
+ if (D->getTypeForDecl())
+ TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+ })
+
+DEF_TRAVERSE_DECL(TypedefDecl, {
+ TRY_TO(TraverseType(D->getUnderlyingType()));
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the typedef, not something that was written in the
+ // source.
+ })
+
+DEF_TRAVERSE_DECL(UnresolvedUsingTypenameDecl, {
+ // A dependent using declaration which was marked with 'typename'.
+ // template<class T> class A : public B<T> { using typename B<T>::foo; };
+ TRY_TO(TraverseNestedNameSpecifier(D->getTargetNestedNameSpecifier()));
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the type, not something that was written in the
+ // source.
+ })
+
+DEF_TRAVERSE_DECL(EnumDecl, {
+ if (D->getTypeForDecl())
+ TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+
+ TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
+ // The enumerators are already traversed by
+ // decls_begin()/decls_end().
+ })
+
+
+// Helper methods for RecordDecl and its children.
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTemplateTypeParmType(
- TemplateTypeParmType *T) {
- return getDerived().VisitType(T);
+bool RecursiveASTVisitor<Derived>::TraverseRecordHelper(
+ RecordDecl *D) {
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the type, not something that was written in the source.
+ //
+ // The anonymous struct or union object is the variable or field
+ // whose type is the anonymous struct or union. We shouldn't
+ // traverse D->getAnonymousStructOrUnionObject(), as it's not
+ // something that is explicitly written in the source.
+ TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
+ return true;
}
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitSubstTemplateTypeParmType(
- SubstTemplateTypeParmType *T) {
- return getDerived().VisitType(T);
+bool RecursiveASTVisitor<Derived>::TraverseCXXRecordHelper(
+ CXXRecordDecl *D) {
+ if (!TraverseRecordHelper(D))
+ return false;
+ if (D->hasDefinition()) {
+ for (CXXRecordDecl::base_class_iterator I = D->bases_begin(),
+ E = D->bases_end();
+ I != E; ++I) {
+ TRY_TO(TraverseType(I->getType()));
+ }
+ // We don't traverse the friends or the conversions, as they are
+ // already in decls_begin()/decls_end().
+ }
+ return true;
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitTemplateSpecializationType(
- TemplateSpecializationType *T) {
- if (getDerived().VisitTemplateName(T->getTemplateName()) ||
- getDerived().VisitTemplateArguments(T->getArgs(), T->getNumArgs()))
+DEF_TRAVERSE_DECL(RecordDecl, {
+ TRY_TO(TraverseRecordHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(CXXRecordDecl, {
+ TRY_TO(TraverseCXXRecordHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(ClassTemplateSpecializationDecl, {
+ // For implicit instantiations ("set<int> x;"), we don't want to
+ // recurse at all, since the instatiated class isn't written in
+ // the source code anywhere. (Note the instatiated *type* --
+ // set<int> -- is written, and will still get a callback of
+ // TemplateSpecializationType). For explicit instantiations
+ // ("template set<int>;"), we do need a callback, since this
+ // is the only callback that's made for this instantiation.
+ // We use getTypeAsWritten() to distinguish.
+ // FIXME: see how we want to handle template specializations.
+ if (TypeSourceInfo *TSI = D->getTypeAsWritten())
+ TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
return true;
+ })
- return getDerived().VisitType(T);
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLocsHelper(
+ const TemplateArgumentLoc *TAL, unsigned Count) {
+ for (unsigned I = 0; I < Count; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TAL[I]));
+ }
+ return true;
}
+DEF_TRAVERSE_DECL(ClassTemplatePartialSpecializationDecl, {
+ // The partial specialization.
+ if (TemplateParameterList *TPL = D->getTemplateParameters()) {
+ for (TemplateParameterList::iterator I = TPL->begin(), E = TPL->end();
+ I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ }
+ // The args that remains unspecialized.
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ D->getTemplateArgsAsWritten(), D->getNumTemplateArgsAsWritten()));
+
+ // Don't need the ClassTemplatePartialSpecializationHelper, even
+ // though that's our parent class -- we already visit all the
+ // template args here.
+ TRY_TO(TraverseCXXRecordHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(EnumConstantDecl, {
+ TRY_TO(TraverseStmt(D->getInitExpr()));
+ })
+
+DEF_TRAVERSE_DECL(UnresolvedUsingValueDecl, {
+ // Like UnresolvedUsingTypenameDecl, but without the 'typename':
+ // template <class T> Class A : public Base<T> { using Base<T>::foo; };
+ TRY_TO(TraverseNestedNameSpecifier(D->getTargetNestedNameSpecifier()));
+ })
+
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitInjectedClassNameType(
- InjectedClassNameType *T) {
- return getDerived().VisitType(T);
+bool RecursiveASTVisitor<Derived>::TraverseDeclaratorHelper(DeclaratorDecl *D) {
+ TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
+ if (D->getTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+ return true;
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitElaboratedType(ElaboratedType *T) {
- if (T->getQualifier() &&
- getDerived().VisitNestedNameSpecifier(T->getQualifier()))
- return true;
- if (Visit(T->getNamedType()))
- return true;
+DEF_TRAVERSE_DECL(FieldDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(ObjCAtDefsFieldDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ // FIXME: implement the rest.
+ })
+
+DEF_TRAVERSE_DECL(ObjCIvarDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ // FIXME: implement the rest.
+ })
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitDependentNameType(
- DependentNameType *T) {
- if (T->getQualifier() &&
- getDerived().VisitNestedNameSpecifier(T->getQualifier()))
+bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
+ TRY_TO(TraverseNestedNameSpecifier(D->getQualifier()));
+
+ // Visit the function type itself, which can be either
+ // FunctionNoProtoType or FunctionProtoType, or a typedef. If it's
+ // not a Function*ProtoType, then it can't have a body or arguments,
+ // so we have to do less work.
+ Type *FuncType = D->getType().getTypePtr();
+ if (FunctionProtoType *FuncProto = dyn_cast<FunctionProtoType>(FuncType)) {
+ if (D->isThisDeclarationADefinition()) {
+ // Don't call Traverse*, or the result type and parameter types
+ // will be double counted.
+ TRY_TO(WalkUpFromFunctionProtoType(FuncProto));
+ } else {
+ // This works around a bug in Clang that does not add the parameters
+ // to decls_begin/end for function declarations (as opposed to
+ // definitions):
+ // http://llvm.org/PR7442
+ // We work around this here by traversing the function type.
+ // This isn't perfect because we don't traverse the default
+ // values, if any. It also may not interact great with
+ // templates. But it's the best we can do until the bug is
+ // fixed.
+ // FIXME: replace the entire 'if' statement with
+ // TRY_TO(WalkUpFromFunctionProtoType(FuncProto));
+ // when the bug is fixed.
+ TRY_TO(TraverseFunctionProtoType(FuncProto));
+ return true;
+ }
+ } else if (FunctionNoProtoType *FuncNoProto =
+ dyn_cast<FunctionNoProtoType>(FuncType)) {
+ // Don't call Traverse*, or the result type will be double
+ // counted.
+ TRY_TO(WalkUpFromFunctionNoProtoType(FuncNoProto));
+ } else { // a typedef type, or who knows what
+ assert(!D->isThisDeclarationADefinition() && "Unexpected function type");
+ TRY_TO(TraverseType(D->getType()));
return true;
+ }
- if (T->getTemplateId() &&
- getDerived().VisitTemplateSpecializationType(
- const_cast<TemplateSpecializationType *>(T->getTemplateId())))
- return true;
+ TRY_TO(TraverseType(D->getResultType()));
+ TRY_TO(TraverseDeclContextHelper(D)); // Parameters.
- return getDerived().VisitType(T);
+ if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
+ // Constructor initializers.
+ for (CXXConstructorDecl::init_iterator I = Ctor->init_begin(),
+ E = Ctor->init_end();
+ I != E; ++I) {
+ TRY_TO(TraverseConstructorInitializer(*I));
+ }
+ }
+
+ if (D->isThisDeclarationADefinition()) {
+ TRY_TO(TraverseStmt(D->getBody())); // Function body.
+ }
+ return true;
}
+DEF_TRAVERSE_DECL(FunctionDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXMethodDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXConstructorDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+// CXXConversionDecl is the declaration of a type conversion operator.
+// It's not a cast expression.
+DEF_TRAVERSE_DECL(CXXConversionDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXDestructorDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitObjCInterfaceType(
- ObjCInterfaceType *T) {
- return getDerived().VisitObjCObjectType(T);
+bool RecursiveASTVisitor<Derived>::TraverseVarHelper(VarDecl *D) {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ // FIXME: This often double-counts -- for instance, for all local
+ // vars, though not for global vars -- because the initializer is
+ // also captured when the var-decl is in a DeclStmt.
+ TRY_TO(TraverseStmt(D->getInit()));
+ return true;
}
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitObjCObjectType(ObjCObjectType *T) {
- // We have to watch out here because an ObjCInterfaceType's base
- // type is itself.
- if (T->getBaseType().getTypePtr() != T)
- if (Visit(T->getBaseType()))
- return true;
+DEF_TRAVERSE_DECL(VarDecl, {
+ TRY_TO(TraverseVarHelper(D));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(ImplicitParamDecl, {
+ TRY_TO(TraverseVarHelper(D));
+ })
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitObjCObjectPointerType(
- ObjCObjectPointerType *T) {
- if (Visit(T->getPointeeType()))
- return true;
+DEF_TRAVERSE_DECL(NonTypeTemplateParmDecl, {
+ // A non-type template parameter, e.g. "S" in template<int S> class Foo ...
+ TRY_TO(TraverseStmt(D->getDefaultArgument()));
+ TRY_TO(TraverseVarHelper(D));
+ })
- return getDerived().VisitType(T);
-}
+DEF_TRAVERSE_DECL(ParmVarDecl, {
+ if (D->hasDefaultArg() &&
+ D->hasUninstantiatedDefaultArg() &&
+ !D->hasUnparsedDefaultArg())
+ TRY_TO(TraverseStmt(D->getUninstantiatedDefaultArg()));
-template<typename Derived>
-bool RecursiveASTVisitorImpl<Derived>::VisitDecl(Decl *D) {
- if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
- for (DeclContext::decl_iterator Child = DC->decls_begin(),
- ChildEnd = DC->decls_end();
- Child != ChildEnd; ++Child)
- if (Visit(*Child))
- return true;
+ if (D->hasDefaultArg() &&
+ !D->hasUninstantiatedDefaultArg() &&
+ !D->hasUnparsedDefaultArg())
+ TRY_TO(TraverseStmt(D->getDefaultArg()));
- return false;
- }
+ TRY_TO(TraverseVarHelper(D));
+ })
- return false;
-}
+#undef DEF_TRAVERSE_DECL
-/// \brief A visitor that recursively walks the entire Clang AST.
-///
-/// Clients of this visitor should subclass the visitor (providing
-/// themselves as the template argument, using the curiously
-/// recurring template pattern) and override any of the Visit*
-/// methods (except Visit()) for declaration, type, statement,
-/// expression, or other AST nodes where the visitor should customize
-/// behavior. Returning "true" from one of these overridden functions
-/// will abort the entire traversal. An overridden Visit* method
-/// will not descend further into the AST for that node unless
-/// Base::Visit* is called.
-template<typename Derived>
-class RecursiveASTVisitor : public RecursiveASTVisitorImpl<Derived> {
- typedef RecursiveASTVisitorImpl<Derived> Impl;
-public:
- typedef RecursiveASTVisitor<Derived> Base;
-
- bool VisitDeclaratorDecl(DeclaratorDecl *D);
- bool VisitFunctionDecl(FunctionDecl *D);
- bool VisitVarDecl(VarDecl *D);
- bool VisitBlockDecl(BlockDecl *D);
- bool VisitDeclStmt(DeclStmt *S);
- bool VisitFunctionType(FunctionType *F);
- bool VisitFunctionProtoType(FunctionProtoType *F);
-};
+// ----------------- Stmt traversal -----------------
+//
+// For stmts, we automate (in the DEF_TRAVERSE_STMT macro) iterating
+// over the children defined in child_begin/child_end (every stmt
+// defines these, though sometimes the range is empty). Each
+// individual Traverse* method only needs to worry about children
+// other than those. To see what child_begin()/end() does for a given
+// class, see, e.g.,
+// http://clang.llvm.org/doxygen/Stmt_8cpp_source.html
+
+// This macro makes available a variable S, the passed-in stmt.
+#define DEF_TRAVERSE_STMT(STMT, CODE) \
+template<typename Derived> \
+bool RecursiveASTVisitor<Derived>::Traverse##STMT (STMT *S) { \
+ TRY_TO(WalkUpFrom##STMT(S)); \
+ { CODE; } \
+ for (Stmt::child_iterator C = S->child_begin(), CEnd = S->child_end(); \
+ C != CEnd; ++C) { \
+ TRY_TO(TraverseStmt(*C)); \
+ } \
+ return true; \
+}
-#define DEFINE_VISIT(Type, Name, Statement) \
- template<typename Derived> \
- bool RecursiveASTVisitor<Derived>::Visit ## Type (Type *Name) { \
- if (Impl::Visit ## Type (Name)) return true; \
- { Statement; } \
- return false; \
- }
+DEF_TRAVERSE_STMT(AsmStmt, {
+ TRY_TO(TraverseStmt(S->getAsmString()));
+ for (unsigned I = 0, E = S->getNumInputs(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getInputConstraintLiteral(I)));
+ }
+ for (unsigned I = 0, E = S->getNumOutputs(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getOutputConstraintLiteral(I)));
+ }
+ for (unsigned I = 0, E = S->getNumClobbers(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getClobber(I)));
+ }
+ // child_begin()/end() iterates over inputExpr and outputExpr.
+ })
-DEFINE_VISIT(DeclaratorDecl, D, {
- if (TypeSourceInfo *TInfo = D->getTypeSourceInfo())
- return this->Visit(TInfo->getType());
+DEF_TRAVERSE_STMT(CXXCatchStmt, {
+ // We don't traverse S->getCaughtType(), as we are already
+ // traversing the exception object, which has this type.
+ // child_begin()/end() iterates over the handler block.
})
-DEFINE_VISIT(FunctionDecl, D, {
- if (D->isThisDeclarationADefinition())
- return this->Visit(D->getBody());
+DEF_TRAVERSE_STMT(ForStmt, {
+ TRY_TO(TraverseDecl(S->getConditionVariable()));
+ // child_begin()/end() iterates over init, cond, inc, and body stmts.
})
-DEFINE_VISIT(VarDecl, D, return this->Visit(D->getInit()))
+DEF_TRAVERSE_STMT(IfStmt, {
+ TRY_TO(TraverseDecl(S->getConditionVariable()));
+ // child_begin()/end() iterates over cond, then, and else stmts.
+ })
-DEFINE_VISIT(BlockDecl, D, return this->Visit(D->getBody()))
+DEF_TRAVERSE_STMT(WhileStmt, {
+ TRY_TO(TraverseDecl(S->getConditionVariable()));
+ // child_begin()/end() iterates over cond, then, and else stmts.
+ })
-DEFINE_VISIT(DeclStmt, S, {
- for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();
- I != E; ++I) {
- if (this->Visit(*I))
- return true;
+// These non-expr stmts (most of them), do not need any action except
+// iterating over the children.
+DEF_TRAVERSE_STMT(BreakStmt, { })
+DEF_TRAVERSE_STMT(CompoundStmt, { })
+DEF_TRAVERSE_STMT(ContinueStmt, { })
+DEF_TRAVERSE_STMT(CXXTryStmt, { })
+DEF_TRAVERSE_STMT(DeclStmt, { })
+DEF_TRAVERSE_STMT(DoStmt, { })
+DEF_TRAVERSE_STMT(GotoStmt, { })
+DEF_TRAVERSE_STMT(IndirectGotoStmt, { })
+DEF_TRAVERSE_STMT(LabelStmt, { })
+DEF_TRAVERSE_STMT(NullStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtCatchStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtFinallyStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtSynchronizedStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtThrowStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtTryStmt, { })
+DEF_TRAVERSE_STMT(ObjCForCollectionStmt, { })
+DEF_TRAVERSE_STMT(ReturnStmt, { })
+DEF_TRAVERSE_STMT(SwitchStmt, { })
+DEF_TRAVERSE_STMT(SwitchCase, { })
+DEF_TRAVERSE_STMT(CaseStmt, { })
+DEF_TRAVERSE_STMT(DefaultStmt, { })
+
+DEF_TRAVERSE_STMT(CXXDependentScopeMemberExpr, {
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
}
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
})
-// FunctionType is the common base class of FunctionNoProtoType (a
-// K&R-style function declaration that has no information about
-// its arguments) and FunctionProtoType.
-DEFINE_VISIT(FunctionType, F, return this->Visit(F->getResultType()))
+DEF_TRAVERSE_STMT(DeclRefExpr, {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
+ // FIXME: Should we be recursing on the qualifier?
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
+ })
-DEFINE_VISIT(FunctionProtoType, F, {
- for (unsigned i = 0; i != F->getNumArgs(); ++i) {
- if (this->Visit(F->getArgType(i)))
- return true;
- }
- for (unsigned i = 0; i != F->getNumExceptions(); ++i) {
- if (this->Visit(F->getExceptionType(i)))
- return true;
+DEF_TRAVERSE_STMT(DependentScopeDeclRefExpr, {
+ // FIXME: Should we be recursing on these two things?
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getExplicitTemplateArgs().getTemplateArgs(),
+ S->getNumTemplateArgs()));
}
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
})
-#undef DEFINE_VISIT
+DEF_TRAVERSE_STMT(MemberExpr, {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
+ // FIXME: Should we be recursing on the qualifier?
+ TRY_TO(TraverseNestedNameSpecifier(S->getQualifier()));
+ })
-#undef DISPATCH
+DEF_TRAVERSE_STMT(ImplicitCastExpr, {
+ // We don't traverse the cast type, as it's not written in the
+ // source code.
+ })
+
+DEF_TRAVERSE_STMT(CStyleCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+DEF_TRAVERSE_STMT(CXXFunctionalCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+DEF_TRAVERSE_STMT(CXXConstCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+DEF_TRAVERSE_STMT(CXXDynamicCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+DEF_TRAVERSE_STMT(CXXReinterpretCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+DEF_TRAVERSE_STMT(CXXStaticCastExpr, {
+ TRY_TO(TraverseType(S->getTypeAsWritten()));
+ })
+
+// InitListExpr is a tricky one, because we want to do all our work on
+// the syntactic form of the listexpr, but this method takes the
+// semantic form by default. We can't use the macro helper because it
+// calls WalkUp*() on the semantic form, before our code can convert
+// to the syntactic form.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(InitListExpr *S) {
+ if (InitListExpr *Syn = S->getSyntacticForm())
+ S = Syn;
+ TRY_TO(WalkUpFromInitListExpr(S));
+ // All we need are the default actions. FIXME: use a helper function.
+ for (Stmt::child_iterator C = S->child_begin(), CEnd = S->child_end();
+ C != CEnd; ++C) {
+ TRY_TO(TraverseStmt(*C));
+ }
+ return true;
+}
+
+DEF_TRAVERSE_STMT(CXXScalarValueInitExpr, {
+ // This is called for code like 'return T()' where T is a built-in
+ // (i.e. non-class) type.
+ if (!S->isImplicit())
+ TRY_TO(TraverseType(S->getType()));
+ })
+
+DEF_TRAVERSE_STMT(CXXNewExpr, {
+ TRY_TO(TraverseType(S->getAllocatedType()));
+ })
+
+// These exprs (most of them), do not need any action except iterating
+// over the children.
+DEF_TRAVERSE_STMT(AddrLabelExpr, { })
+DEF_TRAVERSE_STMT(ArraySubscriptExpr, { })
+DEF_TRAVERSE_STMT(BlockDeclRefExpr, { })
+DEF_TRAVERSE_STMT(BlockExpr, { })
+DEF_TRAVERSE_STMT(ChooseExpr, { })
+DEF_TRAVERSE_STMT(CompoundLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXBindReferenceExpr, { })
+DEF_TRAVERSE_STMT(CXXBindTemporaryExpr, { })
+DEF_TRAVERSE_STMT(CXXBoolLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXDefaultArgExpr, { })
+DEF_TRAVERSE_STMT(CXXDeleteExpr, { })
+DEF_TRAVERSE_STMT(CXXExprWithTemporaries, { })
+DEF_TRAVERSE_STMT(CXXNullPtrLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, { })
+DEF_TRAVERSE_STMT(CXXThisExpr, { })
+DEF_TRAVERSE_STMT(CXXThrowExpr, { })
+DEF_TRAVERSE_STMT(CXXTypeidExpr, { })
+DEF_TRAVERSE_STMT(CXXUnresolvedConstructExpr, { })
+DEF_TRAVERSE_STMT(DesignatedInitExpr, { })
+DEF_TRAVERSE_STMT(ExtVectorElementExpr, { })
+DEF_TRAVERSE_STMT(GNUNullExpr, { })
+DEF_TRAVERSE_STMT(ImplicitValueInitExpr, { })
+DEF_TRAVERSE_STMT(ObjCEncodeExpr, { })
+DEF_TRAVERSE_STMT(ObjCImplicitSetterGetterRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCIsaExpr, { })
+DEF_TRAVERSE_STMT(ObjCIvarRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCMessageExpr, { })
+DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCProtocolExpr, { })
+DEF_TRAVERSE_STMT(ObjCSelectorExpr, { })
+DEF_TRAVERSE_STMT(ObjCSuperExpr, { })
+DEF_TRAVERSE_STMT(OffsetOfExpr, { })
+DEF_TRAVERSE_STMT(ParenExpr, { })
+DEF_TRAVERSE_STMT(ParenListExpr, { })
+DEF_TRAVERSE_STMT(PredefinedExpr, { })
+DEF_TRAVERSE_STMT(ShuffleVectorExpr, { })
+DEF_TRAVERSE_STMT(SizeOfAlignOfExpr, { })
+DEF_TRAVERSE_STMT(StmtExpr, { })
+DEF_TRAVERSE_STMT(TypesCompatibleExpr, { })
+DEF_TRAVERSE_STMT(UnaryTypeTraitExpr, { })
+DEF_TRAVERSE_STMT(UnresolvedLookupExpr, { })
+DEF_TRAVERSE_STMT(UnresolvedMemberExpr, { })
+DEF_TRAVERSE_STMT(VAArgExpr, { })
+DEF_TRAVERSE_STMT(CXXConstructExpr, { })
+
+DEF_TRAVERSE_STMT(CXXTemporaryObjectExpr, {
+ // This is called for code like 'return T()' where T is a class type.
+ TRY_TO(TraverseType(S->getType()));
+ })
+
+DEF_TRAVERSE_STMT(CallExpr, { })
+DEF_TRAVERSE_STMT(CXXMemberCallExpr, { })
+DEF_TRAVERSE_STMT(CXXOperatorCallExpr, { })
+
+// These operators (all of them) do not need any action except
+// iterating over the children.
+DEF_TRAVERSE_STMT(ConditionalOperator, { })
+DEF_TRAVERSE_STMT(UnaryOperator, { })
+DEF_TRAVERSE_STMT(BinaryOperator, { })
+DEF_TRAVERSE_STMT(CompoundAssignOperator, { })
+
+// These literals (all of them) do not need any action.
+DEF_TRAVERSE_STMT(IntegerLiteral, { })
+DEF_TRAVERSE_STMT(CharacterLiteral, { })
+DEF_TRAVERSE_STMT(FloatingLiteral, { })
+DEF_TRAVERSE_STMT(ImaginaryLiteral, { })
+DEF_TRAVERSE_STMT(StringLiteral, { })
+DEF_TRAVERSE_STMT(ObjCStringLiteral, { })
+
+// FIXME: look at the following tricky-seeming exprs to see if we
+// need to recurse on anything. These are ones that have methods
+// returning decls or qualtypes or nestednamespecifier -- though I'm
+// not sure if they own them -- or just seemed very complicated, or
+// had lots of sub-types to explore.
+//
+// VisitOverloadExpr and its children: recurse on template args? etc?
+
+// FIXME: go through all the stmts and exprs again, and see which of them
+// create new types, and recurse on the types (TypeLocs?) of those.
+// Candidates:
+//
+// http://clang.llvm.org/doxygen/classclang_1_1CXXTypeidExpr.html
+// http://clang.llvm.org/doxygen/classclang_1_1SizeOfAlignOfExpr.html
+// http://clang.llvm.org/doxygen/classclang_1_1TypesCompatibleExpr.html
+// http://clang.llvm.org/doxygen/classclang_1_1CXXUnresolvedConstructExpr.html
+// Every class that has getQualifier.
+
+#undef DEF_TRAVERSE_STMT
+
+#undef TRY_TO
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
index 01f4b29..55e1f84 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
@@ -92,6 +92,11 @@ public:
return D;
}
+ /// \brief Returns true if this is the first declaration.
+ bool isFirstDeclaration() const {
+ return RedeclLink.NextIsLatest();
+ }
+
/// \brief Returns the most recent (re)declaration of this declaration.
decl_type *getMostRecentDeclaration() {
return getFirstDeclaration()->RedeclLink.getNext();
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
index 9deae15..a0c95b1 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -99,11 +99,9 @@ public:
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
- first##BASE##Constant = FIRST##Class, \
- last##BASE##Constant = LAST##Class,
+ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
- first##BASE##Constant = FIRST##Class, \
- last##BASE##Constant = LAST##Class
+ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
@@ -617,24 +615,16 @@ public:
/// IfStmt - This represents an if/then/else.
///
class IfStmt : public Stmt {
- enum { COND, THEN, ELSE, END_EXPR };
+ enum { VAR, COND, THEN, ELSE, END_EXPR };
Stmt* SubExprs[END_EXPR];
- /// \brief If non-NULL, the declaration in the "if" statement.
- VarDecl *Var;
-
SourceLocation IfLoc;
SourceLocation ElseLoc;
public:
- IfStmt(SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then,
- SourceLocation EL = SourceLocation(), Stmt *elsev = 0)
- : Stmt(IfStmtClass), Var(var), IfLoc(IL), ElseLoc(EL) {
- SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
- SubExprs[THEN] = then;
- SubExprs[ELSE] = elsev;
- }
-
+ IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = 0);
+
/// \brief Build an empty if/then/else statement
explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
@@ -646,8 +636,8 @@ public:
/// printf("x is %d", x);
/// }
/// \endcode
- VarDecl *getConditionVariable() const { return Var; }
- void setConditionVariable(VarDecl *V) { Var = V; }
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
@@ -689,9 +679,8 @@ protected:
/// SwitchStmt - This represents a 'switch' stmt.
///
class SwitchStmt : public Stmt {
- enum { COND, BODY, END_EXPR };
+ enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
- VarDecl *Var;
// This points to a linked list of case and default statements.
SwitchCase *FirstCase;
SourceLocation SwitchLoc;
@@ -700,12 +689,7 @@ protected:
virtual void DoDestroy(ASTContext &Ctx);
public:
- SwitchStmt(VarDecl *Var, Expr *cond)
- : Stmt(SwitchStmtClass), Var(Var), FirstCase(0)
- {
- SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
- SubExprs[BODY] = NULL;
- }
+ SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond);
/// \brief Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
@@ -719,8 +703,8 @@ public:
/// // ...
/// }
/// \endcode
- VarDecl *getConditionVariable() const { return Var; }
- void setConditionVariable(VarDecl *V) { Var = V; }
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Stmt *getBody() const { return SubExprs[BODY]; }
@@ -768,18 +752,12 @@ public:
/// WhileStmt - This represents a 'while' stmt.
///
class WhileStmt : public Stmt {
- enum { COND, BODY, END_EXPR };
- VarDecl *Var;
+ enum { VAR, COND, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR];
SourceLocation WhileLoc;
public:
- WhileStmt(VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL)
- : Stmt(WhileStmtClass), Var(Var)
- {
- SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
- SubExprs[BODY] = body;
- WhileLoc = WL;
- }
+ WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL);
/// \brief Build an empty while statement.
explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
@@ -792,8 +770,8 @@ public:
/// // ...
/// }
/// \endcode
- VarDecl *getConditionVariable() const { return Var; }
- void setConditionVariable(VarDecl *V) { Var = V; }
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
@@ -875,23 +853,14 @@ public:
/// specified in the source.
///
class ForStmt : public Stmt {
- enum { INIT, COND, INC, BODY, END_EXPR };
+ enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
- VarDecl *CondVar;
SourceLocation ForLoc;
SourceLocation LParenLoc, RParenLoc;
public:
- ForStmt(Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body,
- SourceLocation FL, SourceLocation LP, SourceLocation RP)
- : Stmt(ForStmtClass), CondVar(condVar), ForLoc(FL), LParenLoc(LP),
- RParenLoc(RP)
- {
- SubExprs[INIT] = Init;
- SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
- SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
- SubExprs[BODY] = Body;
- }
+ ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc,
+ Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP);
/// \brief Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
@@ -906,8 +875,8 @@ public:
/// // ...
/// }
/// \endcode
- VarDecl *getConditionVariable() const { return CondVar; }
- void setConditionVariable(VarDecl *V) { CondVar = V; }
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
index a48f4e6..4da2e34 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
@@ -16,6 +16,7 @@
#include "llvm/System/DataTypes.h"
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace clang {
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
index 8b38001..7d5123f 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -256,6 +256,10 @@ public:
return Args.NumArgs;
}
+ /// Determines whether two template arguments are superficially the
+ /// same.
+ bool structurallyEquals(const TemplateArgument &Other) const;
+
/// \brief Construct a template argument pack.
void setArgumentPack(TemplateArgument *Args, unsigned NumArgs, bool CopyArgs);
@@ -476,6 +480,28 @@ public:
const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
const TemplateArgument &Arg);
+
+inline TemplateSpecializationType::iterator
+ TemplateSpecializationType::end() const {
+ return getArgs() + getNumArgs();
+}
+
+inline DependentTemplateSpecializationType::iterator
+ DependentTemplateSpecializationType::end() const {
+ return getArgs() + getNumArgs();
+}
+
+inline const TemplateArgument &
+ TemplateSpecializationType::getArg(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Template argument out of range");
+ return getArgs()[Idx];
+}
+
+inline const TemplateArgument &
+ DependentTemplateSpecializationType::getArg(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Template argument out of range");
+ return getArgs()[Idx];
+}
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
index 2e3b6df..ddfac71 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
@@ -101,6 +101,14 @@ class TemplateName {
}
public:
+ // \brief Kind of name that is actually stored.
+ enum NameKind {
+ Template,
+ OverloadedTemplate,
+ QualifiedTemplate,
+ DependentTemplate
+ };
+
TemplateName() : Storage() { }
explicit TemplateName(TemplateDecl *Template) : Storage(Template) { }
explicit TemplateName(OverloadedTemplateStorage *Storage)
@@ -110,6 +118,9 @@ public:
/// \brief Determine whether this template name is NULL.
bool isNull() const { return Storage.isNull(); }
+
+ // \brief Get the kind of name that is actually stored.
+ NameKind getKind() const;
/// \brief Retrieve the the underlying template declaration that
/// this template name refers to, if known.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
index c24bddb..4c148e8 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/Type.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -271,6 +271,8 @@ public:
}
}
+ bool isSupersetOf(Qualifiers Other) const;
+
bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
@@ -627,6 +629,16 @@ public:
bool isAtLeastAsQualifiedAs(QualType Other) const;
QualType getNonReferenceType() const;
+ /// \brief Determine the type of a (typically non-lvalue) expression with the
+ /// specified result type.
+ ///
+ /// This routine should be used for expressions for which the return type is
+ /// explicitly specified (e.g., in a cast or call) and isn't necessarily
+ /// an lvalue. It removes a top-level reference (since there are no
+ /// expressions of reference type) and deletes top-level cvr-qualifiers
+ /// from non-class types (in C++) or all types (in C).
+ QualType getNonLValueExprType(ASTContext &Context) const;
+
/// getDesugaredType - Return the specified type with any "sugar" removed from
/// the type. This takes off typedefs, typeof's etc. If the outer level of
/// the type is already concrete, it returns it unmodified. This is similar
@@ -774,19 +786,27 @@ private:
/// \brief Linkage of this type.
mutable unsigned CachedLinkage : 2;
-
+
+ /// \brief FromPCH - Whether this type comes from a PCH file.
+ mutable bool FromPCH : 1;
+
+ /// \brief Set whether this type comes from a PCH file.
+ void setFromPCH(bool V = true) const {
+ FromPCH = V;
+ }
+
protected:
/// \brief Compute the linkage of this type.
virtual Linkage getLinkageImpl() const;
- enum { BitsRemainingInType = 20 };
+ enum { BitsRemainingInType = 19 };
// silence VC++ warning C4355: 'this' : used in base member initializer list
Type *this_() { return this; }
Type(TypeClass tc, QualType Canonical, bool dependent)
: CanonicalType(Canonical.isNull() ? QualType(this_(), 0) : Canonical),
TC(tc), Dependent(dependent), LinkageKnown(false),
- CachedLinkage(NoLinkage) {}
+ CachedLinkage(NoLinkage), FromPCH(false) {}
virtual ~Type() {}
virtual void Destroy(ASTContext& C);
friend class ASTContext;
@@ -794,6 +814,9 @@ protected:
public:
TypeClass getTypeClass() const { return static_cast<TypeClass>(TC); }
+ /// \brief Whether this type comes from a PCH file.
+ bool isFromPCH() const { return FromPCH; }
+
bool isCanonicalUnqualified() const {
return CanonicalType.getTypePtr() == this;
}
@@ -835,6 +858,9 @@ public:
/// Helper methods to distinguish type categories. All type predicates
/// operate on the canonical type, ignoring typedefs and qualifiers.
+ /// isBuiltinType - returns true if the type is a builtin type.
+ bool isBuiltinType() const;
+
/// isSpecificBuiltinType - Test for a particular builtin type.
bool isSpecificBuiltinType(unsigned K) const;
@@ -846,8 +872,11 @@ public:
bool isCharType() const;
bool isWideCharType() const;
bool isAnyCharacterType() const;
- bool isIntegralType() const;
+ bool isIntegralType(ASTContext &Ctx) const;
+ /// \brief Determine whether this type is an integral or enumeration type.
+ bool isIntegralOrEnumerationType() const;
+
/// Floating point categories.
bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
/// isComplexType() does *not* include complex integers (a GCC extension).
@@ -923,6 +952,10 @@ public:
/// an objective pointer type for the purpose of GC'ability
bool hasObjCPointerRepresentation() const;
+ /// \brief Determine whether this type has a floating-point representation
+ /// of some sort, e.g., it is a floating-point type or a vector thereof.
+ bool hasFloatingRepresentation() const;
+
// Type Checking Functions: Check to see if this type is structurally the
// specified type, ignoring typedefs and qualifiers, and return a pointer to
// the best type we can.
@@ -1001,6 +1034,9 @@ public:
CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
void dump() const;
static bool classof(const Type *) { return true; }
+
+ friend class PCHReader;
+ friend class PCHWriter;
};
template <> inline const TypedefType *Type::getAs() const {
@@ -1640,6 +1676,13 @@ public:
/// Since the constructor takes the number of vector elements, the
/// client is responsible for converting the size into the number of elements.
class VectorType : public Type, public llvm::FoldingSetNode {
+public:
+ enum AltiVecSpecific {
+ NotAltiVec, // is not AltiVec vector
+ AltiVec, // is AltiVec vector
+ Pixel, // is AltiVec 'vector Pixel'
+ Bool // is AltiVec 'vector bool ...'
+ };
protected:
/// ElementType - The element type of the vector.
QualType ElementType;
@@ -1647,21 +1690,16 @@ protected:
/// NumElements - The number of elements in the vector.
unsigned NumElements;
- /// AltiVec - True if this is for an Altivec vector.
- bool AltiVec;
-
- /// Pixel - True if this is for an Altivec vector pixel.
- bool Pixel;
+ AltiVecSpecific AltiVecSpec;
VectorType(QualType vecType, unsigned nElements, QualType canonType,
- bool isAltiVec, bool isPixel) :
+ AltiVecSpecific altiVecSpec) :
Type(Vector, canonType, vecType->isDependentType()),
- ElementType(vecType), NumElements(nElements),
- AltiVec(isAltiVec), Pixel(isPixel) {}
+ ElementType(vecType), NumElements(nElements), AltiVecSpec(altiVecSpec) {}
VectorType(TypeClass tc, QualType vecType, unsigned nElements,
- QualType canonType, bool isAltiVec, bool isPixel)
+ QualType canonType, AltiVecSpecific altiVecSpec)
: Type(tc, canonType, vecType->isDependentType()), ElementType(vecType),
- NumElements(nElements), AltiVec(isAltiVec), Pixel(isPixel) {}
+ NumElements(nElements), AltiVecSpec(altiVecSpec) {}
friend class ASTContext; // ASTContext creates these.
virtual Linkage getLinkageImpl() const;
@@ -1674,22 +1712,18 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- bool isAltiVec() const { return AltiVec; }
-
- bool isPixel() const { return Pixel; }
-
+ AltiVecSpecific getAltiVecSpecific() const { return AltiVecSpec; }
+
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getElementType(), getNumElements(), getTypeClass(),
- AltiVec, Pixel);
+ Profile(ID, getElementType(), getNumElements(), getTypeClass(), AltiVecSpec);
}
static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
unsigned NumElements, TypeClass TypeClass,
- bool isAltiVec, bool isPixel) {
+ unsigned AltiVecSpec) {
ID.AddPointer(ElementType.getAsOpaquePtr());
ID.AddInteger(NumElements);
ID.AddInteger(TypeClass);
- ID.AddBoolean(isAltiVec);
- ID.AddBoolean(isPixel);
+ ID.AddInteger(AltiVecSpec);
}
static bool classof(const Type *T) {
@@ -1705,7 +1739,7 @@ public:
/// points, colors, and textures (modeled after OpenGL Shading Language).
class ExtVectorType : public VectorType {
ExtVectorType(QualType vecType, unsigned nElements, QualType canonType) :
- VectorType(ExtVector, vecType, nElements, canonType, false, false) {}
+ VectorType(ExtVector, vecType, nElements, canonType, NotAltiVec) {}
friend class ASTContext; // ASTContext creates these.
public:
static int getPointAccessorIdx(char c) {
@@ -1875,6 +1909,7 @@ protected:
public:
QualType getResultType() const { return ResultType; }
+
unsigned getRegParmType() const { return RegParm; }
bool getNoReturnAttr() const { return NoReturn; }
CallingConv getCallConv() const { return (CallingConv)CallConv; }
@@ -1882,6 +1917,12 @@ public:
return ExtInfo(NoReturn, RegParm, (CallingConv)CallConv);
}
+ /// \brief Determine the type of an expression that calls a function of
+ /// this type.
+ QualType getCallResultType(ASTContext &Context) const {
+ return getResultType().getNonLValueExprType(Context);
+ }
+
static llvm::StringRef getNameForCallConv(CallingConv CC);
static bool classof(const Type *T) {
@@ -2416,23 +2457,14 @@ public:
/// dependent.
class TemplateSpecializationType
: public Type, public llvm::FoldingSetNode {
-
- // The ASTContext is currently needed in order to profile expressions.
- // FIXME: avoid this.
- //
- // The bool is whether this is a current instantiation.
- llvm::PointerIntPair<ASTContext*, 1, bool> ContextAndCurrentInstantiation;
-
- /// \brief The name of the template being specialized.
+ /// \brief The name of the template being specialized.
TemplateName Template;
/// \brief - The number of template arguments named in this class
/// template specialization.
unsigned NumArgs;
- TemplateSpecializationType(ASTContext &Context,
- TemplateName T,
- bool IsCurrentInstantiation,
+ TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs, QualType Canon);
@@ -2467,13 +2499,13 @@ public:
/// True if this template specialization type matches a current
/// instantiation in the context in which it is found.
bool isCurrentInstantiation() const {
- return ContextAndCurrentInstantiation.getInt();
+ return isa<InjectedClassNameType>(getCanonicalTypeInternal());
}
typedef const TemplateArgument * iterator;
iterator begin() const { return getArgs(); }
- iterator end() const;
+ iterator end() const; // defined inline in TemplateBase.h
/// \brief Retrieve the name of the template that we are specializing.
TemplateName getTemplateName() const { return Template; }
@@ -2488,20 +2520,18 @@ public:
/// \brief Retrieve a specific template argument as a type.
/// \precondition @c isArgType(Arg)
- const TemplateArgument &getArg(unsigned Idx) const;
+ const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
bool isSugared() const {
return !isDependentType() || isCurrentInstantiation();
}
QualType desugar() const { return getCanonicalTypeInternal(); }
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Template, isCurrentInstantiation(), getArgs(), NumArgs,
- *ContextAndCurrentInstantiation.getPointer());
+ void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Ctx) {
+ Profile(ID, Template, getArgs(), NumArgs, Ctx);
}
static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
- bool IsCurrentInstantiation,
const TemplateArgument *Args,
unsigned NumArgs,
ASTContext &Context);
@@ -2545,6 +2575,9 @@ class InjectedClassNameType : public Type {
friend class ASTContext; // ASTContext creates these.
friend class TagDecl; // TagDecl mutilates the Decl
+ friend class PCHReader; // FIXME: ASTContext::getInjectedClassNameType is not
+ // currently suitable for PCH reading, too much
+ // interdependencies.
InjectedClassNameType(CXXRecordDecl *D, QualType TST)
: Type(InjectedClassName, QualType(), true),
Decl(D), InjectedType(TST) {
@@ -2679,6 +2712,7 @@ class ElaboratedType : public TypeWithKeyword, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
public:
+ ~ElaboratedType();
/// \brief Retrieve the qualification on this type.
NestedNameSpecifier *getQualifier() const { return NNS; }
@@ -2723,11 +2757,8 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
/// \brief The nested name specifier containing the qualifier.
NestedNameSpecifier *NNS;
- typedef llvm::PointerUnion<const IdentifierInfo *,
- const TemplateSpecializationType *> NameType;
-
/// \brief The type that this typename specifier refers to.
- NameType Name;
+ const IdentifierInfo *Name;
DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
const IdentifierInfo *Name, QualType CanonType)
@@ -2737,17 +2768,10 @@ class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
"DependentNameType requires a dependent nested-name-specifier");
}
- DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
- const TemplateSpecializationType *Ty, QualType CanonType)
- : TypeWithKeyword(Keyword, DependentName, CanonType, true),
- NNS(NNS), Name(Ty) {
- assert(NNS->isDependent() &&
- "DependentNameType requires a dependent nested-name-specifier");
- }
-
friend class ASTContext; // ASTContext creates these
public:
+ virtual ~DependentNameType();
/// \brief Retrieve the qualification on this type.
NestedNameSpecifier *getQualifier() const { return NNS; }
@@ -2759,13 +2783,7 @@ public:
/// form of the original typename was terminated by an identifier,
/// e.g., "typename T::type".
const IdentifierInfo *getIdentifier() const {
- return Name.dyn_cast<const IdentifierInfo *>();
- }
-
- /// \brief Retrieve the type named by the typename specifier as a
- /// type specialization.
- const TemplateSpecializationType *getTemplateId() const {
- return Name.dyn_cast<const TemplateSpecializationType *>();
+ return Name;
}
bool isSugared() const { return false; }
@@ -2776,10 +2794,10 @@ public:
}
static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, NameType Name) {
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
ID.AddInteger(Keyword);
ID.AddPointer(NNS);
- ID.AddPointer(Name.getOpaqueValue());
+ ID.AddPointer(Name);
}
static bool classof(const Type *T) {
@@ -2788,6 +2806,83 @@ public:
static bool classof(const DependentNameType *T) { return true; }
};
+/// DependentTemplateSpecializationType - Represents a template
+/// specialization type whose template cannot be resolved, e.g.
+/// A<T>::template B<T>
+class DependentTemplateSpecializationType :
+ public TypeWithKeyword, public llvm::FoldingSetNode {
+
+ /// \brief The nested name specifier containing the qualifier.
+ NestedNameSpecifier *NNS;
+
+ /// \brief The identifier of the template.
+ const IdentifierInfo *Name;
+
+ /// \brief - The number of template arguments named in this class
+ /// template specialization.
+ unsigned NumArgs;
+
+ const TemplateArgument *getArgBuffer() const {
+ return reinterpret_cast<const TemplateArgument*>(this+1);
+ }
+ TemplateArgument *getArgBuffer() {
+ return reinterpret_cast<TemplateArgument*>(this+1);
+ }
+
+ DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ QualType Canon);
+
+ virtual void Destroy(ASTContext& C);
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ virtual ~DependentTemplateSpecializationType();
+
+ NestedNameSpecifier *getQualifier() const { return NNS; }
+ const IdentifierInfo *getIdentifier() const { return Name; }
+
+ /// \brief Retrieve the template arguments.
+ const TemplateArgument *getArgs() const {
+ return getArgBuffer();
+ }
+
+ /// \brief Retrieve the number of template arguments.
+ unsigned getNumArgs() const { return NumArgs; }
+
+ const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
+
+ typedef const TemplateArgument * iterator;
+ iterator begin() const { return getArgs(); }
+ iterator end() const; // inline in TemplateBase.h
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context) {
+ Profile(ID, Context, getKeyword(), NNS, Name, NumArgs, getArgs());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentTemplateSpecialization;
+ }
+ static bool classof(const DependentTemplateSpecializationType *T) {
+ return true;
+ }
+};
+
/// ObjCObjectType - Represents a class type in Objective C.
/// Every Objective C type is a combination of a base type and a
/// list of protocols.
@@ -3310,6 +3405,12 @@ inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
return getFunctionExtInfo(*t);
}
+/// \brief Determine whether this set of qualifiers is a superset of the given
+/// set of qualifiers.
+inline bool Qualifiers::isSupersetOf(Qualifiers Other) const {
+ return Mask != Other.Mask && (Mask | Other.Mask) == Mask;
+}
+
/// isMoreQualifiedThan - Determine whether this type is more
/// qualified than the Other type. For example, "const volatile int"
/// is more qualified than "const int", "volatile int", and
@@ -3454,6 +3555,10 @@ inline bool Type::isTemplateTypeParmType() const {
return isa<TemplateTypeParmType>(CanonicalType);
}
+inline bool Type::isBuiltinType() const {
+ return getAs<BuiltinType>();
+}
+
inline bool Type::isSpecificBuiltinType(unsigned K) const {
if (const BuiltinType *BT = getAs<BuiltinType>())
if (BT->getKind() == (BuiltinType::Kind) K)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
index f988f0e..842c068 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -142,7 +142,7 @@ private:
/// \brief Return the TypeLoc for a type source info.
inline TypeLoc TypeSourceInfo::getTypeLoc() const {
- return TypeLoc(Ty, (void*)(this + 1));
+ return TypeLoc(Ty, const_cast<void*>(static_cast<const void*>(this + 1)));
}
/// \brief Wrapper of type source information for a type with
@@ -657,7 +657,7 @@ struct ObjCInterfaceLocInfo {
};
/// \brief Wrapper for source info for ObjC interfaces.
-class ObjCInterfaceTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+class ObjCInterfaceTypeLoc : public ConcreteTypeLoc<ObjCObjectTypeLoc,
ObjCInterfaceTypeLoc,
ObjCInterfaceType,
ObjCInterfaceLocInfo> {
@@ -1033,13 +1033,20 @@ public:
setLAngleLoc(Loc);
setRAngleLoc(Loc);
setTemplateNameLoc(Loc);
+ initializeArgLocs(getNumArgs(), getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+ }
- for (unsigned i = 0, e = getNumArgs(); i != e; ++i) {
+ static void initializeArgLocs(unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc) {
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
TemplateArgumentLocInfo Info;
#ifndef NDEBUG
// If asserts are enabled, be sure to initialize the argument
// loc with the right kind of pointer.
- switch (getTypePtr()->getArg(i).getKind()) {
+ switch (Args[i].getKind()) {
case TemplateArgument::Expression:
case TemplateArgument::Declaration:
Info = TemplateArgumentLocInfo((Expr*) 0);
@@ -1050,7 +1057,7 @@ public:
break;
case TemplateArgument::Template:
- Info = TemplateArgumentLocInfo(SourceRange(), SourceLocation());
+ Info = TemplateArgumentLocInfo(SourceRange(Loc), Loc);
break;
case TemplateArgument::Integral:
@@ -1060,7 +1067,7 @@ public:
break;
}
#endif
- getArgInfos()[i] = Info;
+ ArgInfos[i] = Info;
}
}
@@ -1251,9 +1258,9 @@ public:
}
};
-struct DependentNameLocInfo {
- SourceLocation KeywordLoc;
- SourceRange QualifierRange;
+// This is exactly the structure of an ElaboratedTypeLoc whose inner
+// type is some sort of TypeDeclTypeLoc.
+struct DependentNameLocInfo : ElaboratedLocInfo {
SourceLocation NameLoc;
};
@@ -1303,6 +1310,107 @@ public:
}
};
+// This is exactly the structure of an ElaboratedTypeLoc whose inner
+// type is some sort of TemplateSpecializationTypeLoc.
+struct DependentTemplateSpecializationLocInfo : DependentNameLocInfo {
+ SourceLocation LAngleLoc;
+ SourceLocation RAngleLoc;
+ // followed by a TemplateArgumentLocInfo[]
+};
+
+class DependentTemplateSpecializationTypeLoc :
+ public ConcreteTypeLoc<UnqualTypeLoc,
+ DependentTemplateSpecializationTypeLoc,
+ DependentTemplateSpecializationType,
+ DependentTemplateSpecializationLocInfo> {
+public:
+ SourceLocation getKeywordLoc() const {
+ return this->getLocalData()->KeywordLoc;
+ }
+ void setKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->KeywordLoc = Loc;
+ }
+
+ SourceRange getQualifierRange() const {
+ return this->getLocalData()->QualifierRange;
+ }
+ void setQualifierRange(SourceRange Range) {
+ this->getLocalData()->QualifierRange = Range;
+ }
+
+ SourceLocation getNameLoc() const {
+ return this->getLocalData()->NameLoc;
+ }
+ void setNameLoc(SourceLocation Loc) {
+ this->getLocalData()->NameLoc = Loc;
+ }
+
+ SourceLocation getLAngleLoc() const {
+ return this->getLocalData()->LAngleLoc;
+ }
+ void setLAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->LAngleLoc = Loc;
+ }
+
+ SourceLocation getRAngleLoc() const {
+ return this->getLocalData()->RAngleLoc;
+ }
+ void setRAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->RAngleLoc = Loc;
+ }
+
+ unsigned getNumArgs() const {
+ return getTypePtr()->getNumArgs();
+ }
+
+ void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
+#ifndef NDEBUG
+ AI.validateForArgument(getTypePtr()->getArg(i));
+#endif
+ getArgInfos()[i] = AI;
+ }
+ TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
+ return getArgInfos()[i];
+ }
+
+ TemplateArgumentLoc getArgLoc(unsigned i) const {
+ return TemplateArgumentLoc(getTypePtr()->getArg(i), getArgLocInfo(i));
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (getKeywordLoc().isValid())
+ return SourceRange(getKeywordLoc(), getRAngleLoc());
+ else
+ return SourceRange(getQualifierRange().getBegin(), getRAngleLoc());
+ }
+
+ void copy(DependentTemplateSpecializationTypeLoc Loc) {
+ unsigned size = getFullDataSize();
+ assert(size == Loc.getFullDataSize());
+ memcpy(Data, Loc.Data, size);
+ }
+
+ void initializeLocal(SourceLocation Loc) {
+ setKeywordLoc(Loc);
+ setQualifierRange(SourceRange(Loc));
+ setNameLoc(Loc);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(getNumArgs(),
+ getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+ }
+
+ unsigned getExtraLocalDataSize() const {
+ return getNumArgs() * sizeof(TemplateArgumentLocInfo);
+ }
+
+private:
+ TemplateArgumentLocInfo *getArgInfos() const {
+ return static_cast<TemplateArgumentLocInfo*>(getExtraLocalData());
+ }
+};
+
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h
index e729488..880af26 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLocBuilder.h
@@ -79,7 +79,14 @@ class TypeLocBuilder {
size_t LocalSize = TypeSpecTypeLoc::LocalDataSize;
return cast<TypeSpecTypeLoc>(pushImpl(T, LocalSize));
}
-
+
+ /// Resets this builder to the newly-initialized state.
+ void clear() {
+#ifndef NDEBUG
+ LastTy = QualType();
+#endif
+ Index = Capacity;
+ }
/// Pushes space for a new TypeLoc of the given type. Invalidates
/// any TypeLocs previously retrieved from this builder.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
index 02508af..9cb5686 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
@@ -92,6 +92,7 @@ NON_CANONICAL_TYPE(SubstTemplateTypeParm, Type)
NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TemplateSpecialization, Type)
DEPENDENT_TYPE(InjectedClassName, Type)
DEPENDENT_TYPE(DependentName, Type)
+DEPENDENT_TYPE(DependentTemplateSpecialization, Type)
TYPE(ObjCObject, Type)
TYPE(ObjCInterface, ObjCObjectType)
TYPE(ObjCObjectPointer, Type)
diff --git a/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h b/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h
index 5ee40e0..534d4d4 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h
+++ b/contrib/llvm/tools/clang/include/clang/AST/UsuallyTinyPtrVector.h
@@ -41,6 +41,7 @@ public:
typedef const T **iterator;
iterator begin() const;
iterator end() const;
+ size_t size() const;
void push_back(T *Method);
void Destroy();
@@ -56,7 +57,6 @@ UsuallyTinyPtrVector<T>::begin() const {
return &Vec->front();
}
-
template<typename T>
typename UsuallyTinyPtrVector<T>::iterator
UsuallyTinyPtrVector<T>::end() const {
@@ -72,6 +72,15 @@ UsuallyTinyPtrVector<T>::end() const {
}
template<typename T>
+size_t UsuallyTinyPtrVector<T>::size() const {
+ if ((Storage & 0x01) == 0)
+ return (Storage == 0) ? 0 : 1;
+
+ vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
+ return Vec->size();
+}
+
+template<typename T>
void UsuallyTinyPtrVector<T>::push_back(T *Element) {
if (Storage == 0) {
// 0 -> 1 element.
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PrintfFormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PrintfFormatString.h
index 7edb9bd..0877efc 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PrintfFormatString.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PrintfFormatString.h
@@ -25,8 +25,8 @@ namespace analyze_printf {
class ArgTypeResult {
public:
- enum Kind { UnknownTy, InvalidTy, SpecificTy, ObjCPointerTy, CStrTy,
- WCStrTy };
+ enum Kind { UnknownTy, InvalidTy, SpecificTy, ObjCPointerTy, CPointerTy,
+ CStrTy, WCStrTy };
private:
const Kind K;
QualType T;
@@ -58,6 +58,7 @@ public:
// C99 conversion specifiers.
dArg, // 'd'
DArg, // 'D' FreeBSD specific specifiers
+ IntAsCharArg, // 'c'
iArg, // 'i',
oArg, // 'o',
uArg, // 'u',
@@ -71,7 +72,6 @@ public:
GArg, // 'G',
aArg, // 'a',
AArg, // 'A',
- IntAsCharArg, // 'c'
CStrArg, // 's'
VoidPtrArg, // 'p'
OutIntPtrArg, // 'n'
@@ -126,45 +126,87 @@ public:
bool isUIntArg() const { return kind >= oArg && kind <= XArg; }
bool isDoubleArg() const { return kind >= fArg && kind <= AArg; }
Kind getKind() const { return kind; }
+ void setKind(Kind k) { kind = k; }
unsigned getLength() const {
// Conversion specifiers currently only are represented by
// single characters, but we be flexible.
return 1;
}
+ const char *toString() const;
private:
const char *Position;
Kind kind;
};
-enum LengthModifier {
- None,
- AsChar, // 'hh'
- AsShort, // 'h'
- AsLong, // 'l'
- AsLongLong, // 'll', 'q' (BSD, deprecated)
- AsIntMax, // 'j'
- AsSizeT, // 'z'
- AsPtrDiff, // 't'
- AsLongDouble, // 'L'
- AsWideChar = AsLong // for '%ls'
+class LengthModifier {
+public:
+ enum Kind {
+ None,
+ AsChar, // 'hh'
+ AsShort, // 'h'
+ AsLong, // 'l'
+ AsLongLong, // 'll', 'q' (BSD, deprecated)
+ AsIntMax, // 'j'
+ AsSizeT, // 'z'
+ AsPtrDiff, // 't'
+ AsLongDouble, // 'L'
+ AsWideChar = AsLong // for '%ls'
+ };
+
+ LengthModifier()
+ : Position(0), kind(None) {}
+ LengthModifier(const char *pos, Kind k)
+ : Position(pos), kind(k) {}
+
+ const char *getStart() const {
+ return Position;
+ }
+
+ unsigned getLength() const {
+ switch (kind) {
+ default:
+ return 1;
+ case AsLongLong:
+ case AsChar:
+ return 2;
+ case None:
+ return 0;
+ }
+ }
+
+ Kind getKind() const { return kind; }
+ void setKind(Kind k) { kind = k; }
+
+ const char *toString() const;
+
+private:
+ const char *Position;
+ Kind kind;
};
class OptionalAmount {
public:
enum HowSpecified { NotSpecified, Constant, Arg, Invalid };
- OptionalAmount(HowSpecified h, unsigned i, const char *st)
- : start(st), hs(h), amt(i) {}
+ OptionalAmount(HowSpecified howSpecified,
+ unsigned amount,
+ const char *amountStart,
+ unsigned amountLength,
+ bool usesPositionalArg)
+ : start(amountStart), length(amountLength), hs(howSpecified), amt(amount),
+ UsesPositionalArg(usesPositionalArg), UsesDotPrefix(0) {}
- OptionalAmount(bool b = true)
- : start(0), hs(b ? NotSpecified : Invalid), amt(0) {}
+ OptionalAmount(bool valid = true)
+ : start(0),length(0), hs(valid ? NotSpecified : Invalid), amt(0),
+ UsesPositionalArg(0), UsesDotPrefix(0) {}
bool isInvalid() const {
return hs == Invalid;
}
HowSpecified getHowSpecified() const { return hs; }
+ void setHowSpecified(HowSpecified h) { hs = h; }
bool hasDataArgument() const { return hs == Arg; }
@@ -179,36 +221,87 @@ public:
}
const char *getStart() const {
- return start;
+ // We include the . character if it is given.
+ return start - UsesDotPrefix;
+ }
+
+ unsigned getConstantLength() const {
+ assert(hs == Constant);
+ return length + UsesDotPrefix;
}
ArgTypeResult getArgType(ASTContext &Ctx) const;
+ void toString(llvm::raw_ostream &os) const;
+
+ bool usesPositionalArg() const { return (bool) UsesPositionalArg; }
+ unsigned getPositionalArgIndex() const {
+ assert(hasDataArgument());
+ return amt + 1;
+ }
+
+ bool usesDotPrefix() const { return UsesDotPrefix; }
+ void setUsesDotPrefix() { UsesDotPrefix = true; }
+
private:
const char *start;
+ unsigned length;
HowSpecified hs;
unsigned amt;
+ bool UsesPositionalArg : 1;
+ bool UsesDotPrefix;
+};
+
+// Class representing optional flags with location and representation
+// information.
+class OptionalFlag {
+public:
+ OptionalFlag(const char *Representation)
+ : representation(Representation), flag(false) {}
+ bool isSet() { return flag; }
+ void set() { flag = true; }
+ void clear() { flag = false; }
+ void setPosition(const char *position) {
+ assert(position);
+ this->position = position;
+ }
+ const char *getPosition() const {
+ assert(position);
+ return position;
+ }
+ const char *toString() const { return representation; }
+
+ // Overloaded operators for bool like qualities
+ operator bool() const { return flag; }
+ OptionalFlag& operator=(const bool &rhs) {
+ flag = rhs;
+ return *this; // Return a reference to myself.
+ }
+private:
+ const char *representation;
+ const char *position;
+ bool flag;
};
class FormatSpecifier {
LengthModifier LM;
- unsigned IsLeftJustified : 1;
- unsigned HasPlusPrefix : 1;
- unsigned HasSpacePrefix : 1;
- unsigned HasAlternativeForm : 1;
- unsigned HasLeadingZeroes : 1;
+ OptionalFlag IsLeftJustified; // '-'
+ OptionalFlag HasPlusPrefix; // '+'
+ OptionalFlag HasSpacePrefix; // ' '
+ OptionalFlag HasAlternativeForm; // '#'
+ OptionalFlag HasLeadingZeroes; // '0'
/// Positional arguments, an IEEE extension:
/// IEEE Std 1003.1, 2004 Edition
/// http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
- unsigned UsesPositionalArg : 1;
+ bool UsesPositionalArg;
unsigned argIndex;
ConversionSpecifier CS;
OptionalAmount FieldWidth;
OptionalAmount Precision;
public:
- FormatSpecifier() : LM(None),
- IsLeftJustified(0), HasPlusPrefix(0), HasSpacePrefix(0),
- HasAlternativeForm(0), HasLeadingZeroes(0), UsesPositionalArg(0),
+ FormatSpecifier() :
+ IsLeftJustified("-"), HasPlusPrefix("+"), HasSpacePrefix(" "),
+ HasAlternativeForm("#"), HasLeadingZeroes("0"), UsesPositionalArg(false),
argIndex(0) {}
static FormatSpecifier Parse(const char *beg, const char *end);
@@ -220,12 +313,27 @@ public:
void setLengthModifier(LengthModifier lm) {
LM = lm;
}
- void setIsLeftJustified() { IsLeftJustified = 1; }
- void setHasPlusPrefix() { HasPlusPrefix = 1; }
- void setHasSpacePrefix() { HasSpacePrefix = 1; }
- void setHasAlternativeForm() { HasAlternativeForm = 1; }
- void setHasLeadingZeros() { HasLeadingZeroes = 1; }
- void setUsesPositionalArg() { UsesPositionalArg = 1; }
+ void setIsLeftJustified(const char *position) {
+ IsLeftJustified = true;
+ IsLeftJustified.setPosition(position);
+ }
+ void setHasPlusPrefix(const char *position) {
+ HasPlusPrefix = true;
+ HasPlusPrefix.setPosition(position);
+ }
+ void setHasSpacePrefix(const char *position) {
+ HasSpacePrefix = true;
+ HasSpacePrefix.setPosition(position);
+ }
+ void setHasAlternativeForm(const char *position) {
+ HasAlternativeForm = true;
+ HasAlternativeForm.setPosition(position);
+ }
+ void setHasLeadingZeros(const char *position) {
+ HasLeadingZeroes = true;
+ HasLeadingZeroes.setPosition(position);
+ }
+ void setUsesPositionalArg() { UsesPositionalArg = true; }
void setArgIndex(unsigned i) {
assert(CS.consumesDataArgument());
@@ -237,13 +345,18 @@ public:
return argIndex;
}
+ unsigned getPositionalArgIndex() const {
+ assert(CS.consumesDataArgument());
+ return argIndex + 1;
+ }
+
// Methods for querying the format specifier.
const ConversionSpecifier &getConversionSpecifier() const {
return CS;
}
- LengthModifier getLengthModifier() const {
+ const LengthModifier &getLengthModifier() const {
return LM;
}
@@ -257,6 +370,7 @@ public:
void setPrecision(const OptionalAmount &Amt) {
Precision = Amt;
+ Precision.setUsesDotPrefix();
}
const OptionalAmount &getPrecision() const {
@@ -270,12 +384,30 @@ public:
/// more than one type.
ArgTypeResult getArgType(ASTContext &Ctx) const;
- bool isLeftJustified() const { return (bool) IsLeftJustified; }
- bool hasPlusPrefix() const { return (bool) HasPlusPrefix; }
- bool hasAlternativeForm() const { return (bool) HasAlternativeForm; }
- bool hasLeadingZeros() const { return (bool) HasLeadingZeroes; }
- bool hasSpacePrefix() const { return (bool) HasSpacePrefix; }
- bool usesPositionalArg() const { return (bool) UsesPositionalArg; }
+ const OptionalFlag &isLeftJustified() const { return IsLeftJustified; }
+ const OptionalFlag &hasPlusPrefix() const { return HasPlusPrefix; }
+ const OptionalFlag &hasAlternativeForm() const { return HasAlternativeForm; }
+ const OptionalFlag &hasLeadingZeros() const { return HasLeadingZeroes; }
+ const OptionalFlag &hasSpacePrefix() const { return HasSpacePrefix; }
+ bool usesPositionalArg() const { return UsesPositionalArg; }
+
+ /// Changes the specifier and length according to a QualType, retaining any
+ /// flags or options. Returns true on success, or false when a conversion
+ /// was not successful.
+ bool fixType(QualType QT);
+
+ void toString(llvm::raw_ostream &os) const;
+
+ // Validation methods - to check if any element results in undefined behavior
+ bool hasValidPlusPrefix() const;
+ bool hasValidAlternativeForm() const;
+ bool hasValidLeadingZeros() const;
+ bool hasValidSpacePrefix() const;
+ bool hasValidLeftJustified() const;
+
+ bool hasValidLengthModifier() const;
+ bool hasValidPrecision() const;
+ bool hasValidFieldWidth() const;
};
enum PositionContext { FieldWidthPos = 0, PrecisionPos = 1 };
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
index c6c9eed..7cd4812 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include <algorithm>
#include <cstring>
+#include <memory>
namespace clang {
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
index d627b88..f20a49a 100644
--- a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
@@ -22,13 +22,14 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
-#define DISPATCH_CASE(CASE,CLASS) \
-case Decl::CASE: \
-static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<CLASS*>(D));\
+#define DISPATCH_CASE(CLASS) \
+case Decl::CLASS: \
+static_cast<ImplClass*>(this)->Visit##CLASS##Decl( \
+ static_cast<CLASS##Decl*>(D)); \
break;
-#define DEFAULT_DISPATCH(CLASS) void Visit##CLASS(CLASS* D) {}
-#define DEFAULT_DISPATCH_VARDECL(CLASS) void Visit##CLASS(CLASS* D)\
+#define DEFAULT_DISPATCH(CLASS) void Visit##CLASS##Decl(CLASS##Decl* D) {}
+#define DEFAULT_DISPATCH_VARDECL(CLASS) void Visit##CLASS##Decl(CLASS##Decl* D)\
{ static_cast<ImplClass*>(this)->VisitVarDecl(D); }
@@ -55,34 +56,39 @@ public:
void VisitDecl(Decl* D) {
switch (D->getKind()) {
- DISPATCH_CASE(Function,FunctionDecl)
- DISPATCH_CASE(CXXMethod,CXXMethodDecl)
- DISPATCH_CASE(Var,VarDecl)
- DISPATCH_CASE(ParmVar,ParmVarDecl) // FIXME: (same)
- DISPATCH_CASE(ImplicitParam,ImplicitParamDecl)
- DISPATCH_CASE(EnumConstant,EnumConstantDecl)
- DISPATCH_CASE(Typedef,TypedefDecl)
- DISPATCH_CASE(Record,RecordDecl) // FIXME: Refine. VisitStructDecl?
- DISPATCH_CASE(Enum,EnumDecl)
+ DISPATCH_CASE(Function)
+ DISPATCH_CASE(CXXMethod)
+ DISPATCH_CASE(Var)
+ DISPATCH_CASE(ParmVar) // FIXME: (same)
+ DISPATCH_CASE(ImplicitParam)
+ DISPATCH_CASE(EnumConstant)
+ DISPATCH_CASE(Typedef)
+ DISPATCH_CASE(Record) // FIXME: Refine. VisitStructDecl?
+ DISPATCH_CASE(CXXRecord)
+ DISPATCH_CASE(Enum)
default:
assert(false && "Subtype of ScopedDecl not handled.");
}
}
- DEFAULT_DISPATCH(VarDecl)
- DEFAULT_DISPATCH(FunctionDecl)
- DEFAULT_DISPATCH(CXXMethodDecl)
- DEFAULT_DISPATCH_VARDECL(ParmVarDecl)
- DEFAULT_DISPATCH(ImplicitParamDecl)
- DEFAULT_DISPATCH(EnumConstantDecl)
- DEFAULT_DISPATCH(TypedefDecl)
- DEFAULT_DISPATCH(RecordDecl)
- DEFAULT_DISPATCH(EnumDecl)
- DEFAULT_DISPATCH(ObjCInterfaceDecl)
- DEFAULT_DISPATCH(ObjCClassDecl)
- DEFAULT_DISPATCH(ObjCMethodDecl)
- DEFAULT_DISPATCH(ObjCProtocolDecl)
- DEFAULT_DISPATCH(ObjCCategoryDecl)
+ DEFAULT_DISPATCH(Var)
+ DEFAULT_DISPATCH(Function)
+ DEFAULT_DISPATCH(CXXMethod)
+ DEFAULT_DISPATCH_VARDECL(ParmVar)
+ DEFAULT_DISPATCH(ImplicitParam)
+ DEFAULT_DISPATCH(EnumConstant)
+ DEFAULT_DISPATCH(Typedef)
+ DEFAULT_DISPATCH(Record)
+ DEFAULT_DISPATCH(Enum)
+ DEFAULT_DISPATCH(ObjCInterface)
+ DEFAULT_DISPATCH(ObjCClass)
+ DEFAULT_DISPATCH(ObjCMethod)
+ DEFAULT_DISPATCH(ObjCProtocol)
+ DEFAULT_DISPATCH(ObjCCategory)
+
+ void VisitCXXRecordDecl(CXXRecordDecl *D) {
+ static_cast<ImplClass*>(this)->VisitRecordDecl(D);
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
new file mode 100644
index 0000000..98871d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -0,0 +1,382 @@
+////////////////////////////////////////////////////////////////////////////////
+// Note: This file is a work in progress. Please do not apply non-trivial
+// updates unless you have talked to Sean Hunt <rideau3@gmail.com> prior.
+// Merely adding a new attribute is a trivial update.
+////////////////////////////////////////////////////////////////////////////////
+
+// An attribute's subject is whatever it appertains to. In this file, it is
+// more accurately a list of things that an attribute can appertain to. All
+// Decls and Stmts are possibly AttrSubjects (even though the syntax may not
+// allow attributes on a given Decl or Stmt).
+class AttrSubject;
+
+include "clang/Basic/DeclNodes.td"
+include "clang/Basic/StmtNodes.td"
+
+// A subset-subject is an AttrSubject constrained to operate only on some subset
+// of that subject.
+//
+// The description is used in output messages to specify what the subject
+// represents. FIXME: Deal with translation issues.
+//
+// The code fragment is a boolean expression that will confirm that the subject
+// meets the requirements; the subject will have the name S, and will have the
+// type specified by the base. It should be a simple boolean expression.
+class SubsetSubject<AttrSubject base, string description, code check>
+ : AttrSubject {
+ AttrSubject Base = base;
+ string Description = description;
+ code CheckCode = check;
+}
+
+// This is the type of a variable which C++0x defines [[aligned()]] as being
+// a possible subject.
+def NormalVar : SubsetSubject<Var, "non-register, non-parameter variable",
+ [{S->getStorageClass() != VarDecl::Register &&
+ S->getKind() != Decl::ImplicitParam
+ S->getKind() != Decl::ParmVar
+ S->getKind() != Decl::NonTypeTemplateParm}]>;
+def CXXVirtualMethod : SubsetSubject<CXXRecord, "virtual member function",
+ [{S->isVirtual()}]>;
+def NonBitField : SubsetSubject<Field, "non-bit field",
+ [{!S->isBitField()}]>;
+
+// A single argument to an attribute
+class Argument<string name> {
+ string Name = name;
+}
+
+class IdentifierArgument<string name> : Argument<name>;
+class IntArgument<string name> : Argument<name>;
+class StringArgument<string name> : Argument<name>;
+class ExprArgument<string name> : Argument<name>;
+class FunctionArgument<string name> : Argument<name>;
+class ObjCInterfaceArgument<string name> : Argument<name>;
+class UnsignedIntArgument<string name> : Argument<name>;
+class UnsignedIntOrTypeArgument<string name> : Argument<name>;
+
+// An integer argument with a default value
+class DefaultIntArgument<string name, int default> : IntArgument<name> {
+ int Default = default;
+}
+
+// Zero or more arguments of a type
+class VariadicArgument<Argument arg> : Argument<arg.Name> {
+ Argument VariadicArg = arg;
+}
+
+class Attr {
+ // The various ways in which an attribute can be spelled in source
+ list<string> Spellings;
+ // The things to which an attribute can appertain
+ list<AttrSubject> Subjects;
+ // The arguments allowed on an attribute
+ list<Argument> Args = [];
+ // The namespaces in which the attribute appears in C++0x attributes.
+ // The attribute will not be permitted in C++0x attribute-specifiers if
+ // this is empty; the empty string can be used as a namespace.
+ list<string> Namespaces = [];
+ // A temporary development bit to tell TableGen not to emit certain
+ // information about the attribute.
+ bit DoNotEmit = 1;
+}
+
+//
+// Attributes begin here
+//
+
+def Alias : Attr {
+ let Spellings = ["alias"];
+ let Args = [StringArgument<"AliasName">];
+}
+
+def Aligned : Attr {
+ let Spellings = ["align", "aligned"];
+ let Subjects = [NonBitField, NormalVar, Tag];
+ let Args = [UnsignedIntOrTypeArgument<"Alignment">];
+ let Namespaces = ["", "std"];
+}
+
+def AlignMac68k : Attr {
+ let Spellings = [];
+}
+
+def AlwaysInline : Attr {
+ let Spellings = ["always_inline"];
+}
+
+def AnalyzerNoReturn : Attr {
+ let Spellings = ["analyzer_noreturn"];
+}
+
+def Annotate : Attr {
+ let Spellings = ["annotate"];
+ let Args = [StringArgument<"Annotation">];
+}
+
+def AsmLabel : Attr {
+ let Spellings = [];
+ let Args = [StringArgument<"Label">];
+}
+
+def BaseCheck : Attr {
+ let Spellings = ["base_check"];
+ let Subjects = [CXXRecord];
+ let Namespaces = ["", "std"];
+ let DoNotEmit = 0;
+}
+
+def Blocks : Attr {
+ let Spellings = ["blocks"];
+ let Args = [IdentifierArgument<"Type">];
+}
+
+def CarriesDependency : Attr {
+ let Spellings = ["carries_dependency"];
+ let Subjects = [ParmVar, Function];
+ let Namespaces = ["", "std"];
+ let DoNotEmit = 0;
+}
+
+def CDecl : Attr {
+ let Spellings = ["cdecl", "__cdecl"];
+}
+
+def CFReturnsRetained : Attr {
+ let Spellings = ["cf_returns_retained"];
+}
+
+def CFReturnsNotRetained : Attr {
+ let Spellings = ["cf_returns_not_retained"];
+}
+
+def Cleanup : Attr {
+ let Spellings = ["cleanup"];
+ let Args = [FunctionArgument<"FunctionDecl">];
+}
+
+def Const : Attr {
+ let Spellings = ["const"];
+}
+
+def Constructor : Attr {
+ let Spellings = ["constructor"];
+ let Args = [IntArgument<"Priority">];
+}
+
+def Deprecated : Attr {
+ let Spellings = ["deprecated"];
+}
+
+def Destructor : Attr {
+ let Spellings = ["destructor"];
+ let Args = [IntArgument<"Priority">];
+}
+
+def DLLExport : Attr {
+ let Spellings = ["dllexport"];
+}
+
+def DLLImport : Attr {
+ let Spellings = ["dllimport"];
+}
+
+def FastCall : Attr {
+ let Spellings = ["fastcall", "__fastcall"];
+}
+
+def Final : Attr {
+ let Spellings = ["final"];
+ let Subjects = [CXXRecord, CXXVirtualMethod];
+ let Namespaces = ["", "std"];
+ let DoNotEmit = 0;
+}
+
+def Format : Attr {
+ let Spellings = ["format"];
+ let Args = [StringArgument<"Type">, IntArgument<"FormatIdx">,
+ IntArgument<"FirstArg">];
+}
+
+def FormatArg : Attr {
+ let Spellings = ["format_arg"];
+ let Args = [IntArgument<"FormatIdx">];
+}
+
+def GNUInline : Attr {
+ let Spellings = ["gnu_inline"];
+}
+
+def Hiding : Attr {
+ let Spellings = ["hiding"];
+ let Subjects = [Field, CXXMethod];
+ let Namespaces = ["", "std"];
+ let DoNotEmit = 0;
+}
+
+def IBAction : Attr {
+ let Spellings = ["ibaction"];
+}
+
+def IBOutlet : Attr {
+ let Spellings = ["iboutlet"];
+}
+
+def IBOutletCollection : Attr {
+ let Spellings = ["iboutletcollection"];
+ let Args = [ObjCInterfaceArgument<"Class">];
+}
+
+def Malloc : Attr {
+ let Spellings = ["malloc"];
+}
+
+def MaxFieldAlignment : Attr {
+ let Spellings = [];
+ let Args = [UnsignedIntArgument<"Alignment">];
+}
+
+def MSP430Interrupt : Attr {
+ let Spellings = [];
+ let Args = [UnsignedIntArgument<"Number">];
+}
+
+def NoDebug : Attr {
+ let Spellings = ["nodebug"];
+}
+
+def NoInline : Attr {
+ let Spellings = ["noinline"];
+}
+
+def NonNull : Attr {
+ let Spellings = ["nonnull"];
+ let Args = [VariadicArgument<UnsignedIntArgument<"Args">>];
+}
+
+def NoReturn : Attr {
+ let Spellings = ["noreturn"];
+ // FIXME: Does GCC allow this on the function instead?
+ let Subjects = [Function];
+ let Namespaces = ["", "std"];
+}
+
+def NoInstrumentFunction : Attr {
+ let Spellings = ["no_instrument_function"];
+ let Subjects = [Function];
+}
+
+def NoThrow : Attr {
+ let Spellings = ["nothrow"];
+}
+
+def NSReturnsRetained : Attr {
+ let Spellings = ["ns_returns_retained"];
+}
+
+def NSReturnsNotRetained : Attr {
+ let Spellings = ["ns_returns_not_retained"];
+}
+
+def ObjCException : Attr {
+ let Spellings = ["objc_exception"];
+}
+
+def ObjCNSObject : Attr {
+ let Spellings = ["NSOjbect"];
+}
+
+def Override : Attr {
+ let Spellings = ["override"];
+ let Subjects = [CXXVirtualMethod];
+ let Namespaces = ["", "std"];
+ let DoNotEmit = 0;
+}
+
+def Overloadable : Attr {
+ let Spellings = ["overloadable"];
+}
+
+def Packed : Attr {
+ let Spellings = ["packed"];
+}
+
+def Pure : Attr {
+ let Spellings = ["pure"];
+}
+
+def Regparm : Attr {
+ let Spellings = ["regparm"];
+ let Args = [UnsignedIntArgument<"NumParams">];
+}
+
+def ReqdWorkGroupSize : Attr {
+ let Spellings = ["reqd_work_group_size"];
+ let Args = [UnsignedIntArgument<"XDim">, UnsignedIntArgument<"YDim">,
+ UnsignedIntArgument<"ZDim">];
+}
+
+def InitPriority : Attr {
+ let Spellings = ["init_priority"];
+ let Args = [UnsignedIntArgument<"Priority">];
+}
+
+def Section : Attr {
+ let Spellings = ["section"];
+ let Args = [StringArgument<"Name">];
+}
+
+def Sentinel : Attr {
+ let Spellings = ["sentinel"];
+ let Args = [DefaultIntArgument<"NulPos", 0>,
+ DefaultIntArgument<"Sentinel", 0>];
+}
+
+def StdCall : Attr {
+ let Spellings = ["stdcall", "__stdcall"];
+}
+
+def ThisCall : Attr {
+ let Spellings = ["thiscall", "__thiscall"];
+}
+
+def TransparentUnion : Attr {
+ let Spellings = ["transparent_union"];
+}
+
+def Unavailable : Attr {
+ let Spellings = ["unavailable"];
+}
+
+def Unused : Attr {
+ let Spellings = ["unused"];
+}
+
+def Used : Attr {
+ let Spellings = ["used"];
+}
+
+def Visibility : Attr {
+ let Spellings = ["visibility"];
+ let Args = [StringArgument<"Visibility">];
+}
+
+def WarnUnusedResult : Attr {
+ let Spellings = ["warn_unused_result"];
+}
+
+def Weak : Attr {
+ let Spellings = ["weak"];
+}
+
+def WeakImport : Attr {
+ let Spellings = ["weak_import"];
+}
+
+def WeakRef : Attr {
+ let Spellings = ["weakref"];
+}
+
+def X86ForceAlignArgPointer : Attr {
+ let Spellings = [];
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
new file mode 100644
index 0000000..822573b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
@@ -0,0 +1,31 @@
+//===----- Attr.h - Enum values for C Attribute Kinds ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the attr::Kind enum
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ATTRKINDS_H
+#define LLVM_CLANG_ATTRKINDS_H
+
+namespace clang {
+
+namespace attr {
+
+// Kind - This is a list of all the recognized kinds of attributes.
+enum Kind {
+#define ATTR(X) X,
+#include "clang/Basic/AttrList.inc"
+ NUM_ATTRS
+};
+
+} // end namespace attr
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
index b306954..eff4f5e 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -314,6 +314,7 @@ BUILTIN(__builtin_setjmp, "iv**", "")
BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
BUILTIN(__builtin_eh_return_data_regno, "ii", "nc")
+BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
@@ -540,6 +541,7 @@ LIBBUILTIN(siglongjmp, "vSJi", "fr", "setjmp.h")
// id objc_msgSend(id, SEL)
// but we need new type letters for that.
LIBBUILTIN(objc_msgSend, "v*.", "f", "objc/message.h")
+BUILTIN(__builtin_objc_memmove_collectable, "v*v*vC*z", "nF")
// Builtin math library functions
LIBBUILTIN(pow, "ddd", "fe", "math.h")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
index 4973076a..54e4c2b 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
@@ -14,7 +14,13 @@
// The format of this database matches clang/Basic/Builtins.def.
-// FIXME: This is just a placeholder. NEON intrinsics should be listed here.
+// In libgcc
+BUILTIN(__clear_cache, "vc*c*", "")
BUILTIN(__builtin_thread_pointer, "v*", "")
+// NEON
+#define GET_NEON_BUILTINS
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_BUILTINS
+
#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
index 287bba9..e0518dc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
@@ -18,14 +18,6 @@
// The format of this database matches clang/Basic/Builtins.def.
// This is just a placeholder, the types and attributes are wrong.
-BUILTIN(__builtin_altivec_abs_v16qi, "V16UcV16Sc", "")
-BUILTIN(__builtin_altivec_abs_v8hi, "V8UsV8Ss", "")
-BUILTIN(__builtin_altivec_abs_v4si, "V4UiV4Si", "")
-
-BUILTIN(__builtin_altivec_abss_v16qi, "V16UcV16Sc", "")
-BUILTIN(__builtin_altivec_abss_v8hi, "V8UsV8Ss", "")
-BUILTIN(__builtin_altivec_abss_v4si, "V4UiV4Si", "")
-
BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "")
BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "")
@@ -49,6 +41,67 @@ BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "")
BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "")
BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcfsx, "V4fV4ii", "")
+BUILTIN(__builtin_altivec_vcfux, "V4fV4ii", "")
+BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fi", "")
+BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fi", "")
+
+BUILTIN(__builtin_altivec_dss, "vUi", "")
+BUILTIN(__builtin_altivec_dssall, "v", "")
+BUILTIN(__builtin_altivec_dst, "vv*iUi", "")
+BUILTIN(__builtin_altivec_dstt, "vv*iUi", "")
+BUILTIN(__builtin_altivec_dstst, "vv*iUi", "")
+BUILTIN(__builtin_altivec_dststt, "vv*iUi", "")
+
+BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_lvx, "V4iiv*", "")
+BUILTIN(__builtin_altivec_lvxl, "V4iiv*", "")
+BUILTIN(__builtin_altivec_lvebx, "V16civ*", "")
+BUILTIN(__builtin_altivec_lvehx, "V8siv*", "")
+BUILTIN(__builtin_altivec_lvewx, "V4iiv*", "")
+
+BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_lvsl, "V16cUcv*", "")
+BUILTIN(__builtin_altivec_lvsr, "V16cUcv*", "")
+
+BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "")
+BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "")
+
+BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "")
+BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "")
+BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "")
+BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "")
+BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "")
+BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "")
+
+BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "")
+
+BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "")
+
BUILTIN(__builtin_altivec_stvx, "vV4iiv*", "")
BUILTIN(__builtin_altivec_stvxl, "vV4iiv*", "")
BUILTIN(__builtin_altivec_stvebx, "vV16civ*", "")
@@ -92,6 +145,48 @@ BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "")
BUILTIN(__builtin_altivec_mtvscr, "vV4i", "")
+BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "")
+BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "")
+BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "")
+BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "")
+
+BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "")
+BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "")
+BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "")
+BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "")
+
+BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "")
+BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "")
+BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "")
+
+BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "")
+BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "")
+BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "")
+
+BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "")
+BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "")
+BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "")
+
BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "")
BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "")
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/CMakeLists.txt b/contrib/llvm/tools/clang/include/clang/Basic/CMakeLists.txt
index c2a4e13..c595236 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/include/clang/Basic/CMakeLists.txt
@@ -18,3 +18,15 @@ tablegen(DiagnosticGroups.inc
-gen-clang-diag-groups)
add_custom_target(ClangDiagnosticGroups
DEPENDS DiagnosticGroups.inc)
+
+set(LLVM_TARGET_DEFINITIONS Attr.td)
+tablegen(AttrList.inc
+ -gen-clang-attr-list
+ -I ${CMAKE_CURRENT_SOURCE_DIR}/../../)
+add_custom_target(ClangAttrList
+ DEPENDS AttrList.inc)
+
+# ARM NEON
+set(LLVM_TARGET_DEFINITIONS arm_neon.td)
+tablegen(arm_neon.inc -gen-arm-neon-sema)
+add_custom_target(ClangARMNeon DEPENDS arm_neon.inc)
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
new file mode 100644
index 0000000..203fb45
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
@@ -0,0 +1,70 @@
+class AttrSubject;
+
+class Decl<bit abstract = 0> : AttrSubject {
+ bit Abstract = abstract;
+}
+
+class DDecl<Decl base, bit abstract = 0> : Decl<abstract> {
+ Decl Base = base;
+}
+
+class DeclContext { }
+
+def TranslationUnit : Decl, DeclContext;
+def Named : Decl<1>;
+ def Namespace : DDecl<Named>, DeclContext;
+ def UsingDirective : DDecl<Named>;
+ def NamespaceAlias : DDecl<Named>;
+ def Type : DDecl<Named, 1>;
+ def Typedef : DDecl<Type>;
+ def UnresolvedUsingTypename : DDecl<Type>;
+ def Tag : DDecl<Type, 1>, DeclContext;
+ def Enum : DDecl<Tag>;
+ def Record : DDecl<Tag>;
+ def CXXRecord : DDecl<Record>;
+ def ClassTemplateSpecialization : DDecl<CXXRecord>;
+ def ClassTemplatePartialSpecialization
+ : DDecl<ClassTemplateSpecialization>;
+ def TemplateTypeParm : DDecl<Type>;
+ def Value : DDecl<Named, 1>;
+ def EnumConstant : DDecl<Value>;
+ def UnresolvedUsingValue : DDecl<Value>;
+ def Declarator : DDecl<Value, 1>;
+ def Function : DDecl<Declarator>, DeclContext;
+ def CXXMethod : DDecl<Function>;
+ def CXXConstructor : DDecl<CXXMethod>;
+ def CXXDestructor : DDecl<CXXMethod>;
+ def CXXConversion : DDecl<CXXMethod>;
+ def Field : DDecl<Declarator>;
+ def ObjCIvar : DDecl<Field>;
+ def ObjCAtDefsField : DDecl<Field>;
+ def Var : DDecl<Declarator>;
+ def ImplicitParam : DDecl<Var>;
+ def ParmVar : DDecl<Var>;
+ def NonTypeTemplateParm : DDecl<Var>;
+ def Template : DDecl<Named, 1>;
+ def FunctionTemplate : DDecl<Template>;
+ def ClassTemplate : DDecl<Template>;
+ def TemplateTemplateParm : DDecl<Template>;
+ def Using : DDecl<Named>;
+ def UsingShadow : DDecl<Named>;
+ def ObjCMethod : DDecl<Named>, DeclContext;
+ def ObjCContainer : DDecl<Named, 1>, DeclContext;
+ def ObjCCategory : DDecl<ObjCContainer>;
+ def ObjCProtocol : DDecl<ObjCContainer>;
+ def ObjCInterface : DDecl<ObjCContainer>;
+ def ObjCImpl : DDecl<ObjCContainer, 1>;
+ def ObjCCategoryImpl : DDecl<ObjCImpl>;
+ def ObjCImplementation : DDecl<ObjCImpl>;
+ def ObjCProperty : DDecl<Named>;
+ def ObjCCompatibleAlias : DDecl<Named>;
+def LinkageSpec : Decl, DeclContext;
+def ObjCPropertyImpl : Decl;
+def ObjCForwardProtocol : Decl;
+def ObjCClass : Decl;
+def FileScopeAsm : Decl;
+def AccessSpec : Decl;
+def Friend : Decl;
+def FriendTemplate : Decl;
+def StaticAssert : Decl;
+def Block : Decl, DeclContext;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
index 62f06ed..1fe0d81 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -24,7 +24,6 @@
namespace llvm {
template <typename T> class SmallVectorImpl;
- class raw_ostream;
}
namespace clang {
@@ -36,8 +35,6 @@ namespace clang {
class LangOptions;
class PartialDiagnostic;
class Preprocessor;
- class SourceManager;
- class SourceRange;
// Import the diagnostic enums themselves.
namespace diag {
@@ -98,8 +95,8 @@ namespace clang {
/// compilation.
class FixItHint {
public:
- /// \brief Tokens that should be removed to correct the error.
- SourceRange RemoveRange;
+ /// \brief Code that should be removed to correct the error.
+ CharSourceRange RemoveRange;
/// \brief The location at which we should insert code to correct
/// the error.
@@ -129,15 +126,18 @@ public:
/// \brief Create a code modification hint that removes the given
/// source range.
- static FixItHint CreateRemoval(SourceRange RemoveRange) {
+ static FixItHint CreateRemoval(CharSourceRange RemoveRange) {
FixItHint Hint;
Hint.RemoveRange = RemoveRange;
return Hint;
}
-
+ static FixItHint CreateRemoval(SourceRange RemoveRange) {
+ return CreateRemoval(CharSourceRange::getTokenRange(RemoveRange));
+ }
+
/// \brief Create a code modification hint that replaces the given
/// source range with the given code string.
- static FixItHint CreateReplacement(SourceRange RemoveRange,
+ static FixItHint CreateReplacement(CharSourceRange RemoveRange,
llvm::StringRef Code) {
FixItHint Hint;
Hint.RemoveRange = RemoveRange;
@@ -145,6 +145,11 @@ public:
Hint.CodeToInsert = Code;
return Hint;
}
+
+ static FixItHint CreateReplacement(SourceRange RemoveRange,
+ llvm::StringRef Code) {
+ return CreateReplacement(CharSourceRange::getTokenRange(RemoveRange), Code);
+ }
};
/// Diagnostic - This concrete class is used by the front-end to report
@@ -176,7 +181,14 @@ public:
ak_nestednamespec, // NestedNameSpecifier *
ak_declcontext // DeclContext *
};
-
+
+ /// Specifies which overload candidates to display when overload resolution
+ /// fails.
+ enum OverloadsShown {
+ Ovl_All, ///< Show all overloads.
+ Ovl_Best ///< Show just the "best" overload candidates.
+ };
+
/// ArgumentValue - This typedef represents on argument value, which is a
/// union discriminated by ArgumentKind, with a value.
typedef std::pair<ArgumentKind, intptr_t> ArgumentValue;
@@ -188,6 +200,7 @@ private:
bool ErrorsAsFatal; // Treat errors like fatal errors.
bool SuppressSystemWarnings; // Suppress warnings in system headers.
bool SuppressAllDiagnostics; // Suppress all diagnostics.
+ OverloadsShown ShowOverloads; // Which overload candidates to show.
unsigned ErrorLimit; // Cap of # errors emitted, 0 -> no limit.
unsigned TemplateBacktraceLimit; // Cap on depth of template backtrace stack,
// 0 -> no limit.
@@ -318,6 +331,13 @@ public:
}
bool getSuppressAllDiagnostics() const { return SuppressAllDiagnostics; }
+ /// \brief Specify which overload candidates to show when overload resolution
+ /// fails. By default, we show all candidates.
+ void setShowOverloads(OverloadsShown Val) {
+ ShowOverloads = Val;
+ }
+ OverloadsShown getShowOverloads() const { return ShowOverloads; }
+
/// \brief Pretend that the last diagnostic issued was ignored. This can
/// be used by clients who suppress diagnostics themselves.
void setLastDiagnosticIgnored() {
@@ -582,7 +602,7 @@ private:
/// DiagRanges - The list of ranges added to this diagnostic. It currently
/// only support 10 ranges, could easily be extended if needed.
- SourceRange DiagRanges[10];
+ CharSourceRange DiagRanges[10];
enum { MaxFixItHints = 3 };
@@ -681,7 +701,7 @@ public:
}
}
- void AddSourceRange(const SourceRange &R) const {
+ void AddSourceRange(const CharSourceRange &R) const {
assert(NumRanges <
sizeof(DiagObj->DiagRanges)/sizeof(DiagObj->DiagRanges[0]) &&
"Too many arguments to diagnostic!");
@@ -752,11 +772,17 @@ operator<<(const DiagnosticBuilder &DB, T *DC) {
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
const SourceRange &R) {
- DB.AddSourceRange(R);
+ DB.AddSourceRange(CharSourceRange::getTokenRange(R));
return DB;
}
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const CharSourceRange &R) {
+ DB.AddSourceRange(R);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
const FixItHint &Hint) {
DB.AddFixItHint(Hint);
return DB;
@@ -849,7 +875,7 @@ public:
return DiagObj->NumDiagRanges;
}
- SourceRange getRange(unsigned Idx) const {
+ const CharSourceRange &getRange(unsigned Idx) const {
assert(Idx < DiagObj->NumDiagRanges && "Invalid diagnostic range index!");
return DiagObj->DiagRanges[Idx];
}
@@ -886,7 +912,7 @@ class StoredDiagnostic {
Diagnostic::Level Level;
FullSourceLoc Loc;
std::string Message;
- std::vector<SourceRange> Ranges;
+ std::vector<CharSourceRange> Ranges;
std::vector<FixItHint> FixIts;
public:
@@ -902,7 +928,7 @@ public:
const FullSourceLoc &getLocation() const { return Loc; }
llvm::StringRef getMessage() const { return Message; }
- typedef std::vector<SourceRange>::const_iterator range_iterator;
+ typedef std::vector<CharSourceRange>::const_iterator range_iterator;
range_iterator range_begin() const { return Ranges.begin(); }
range_iterator range_end() const { return Ranges.end(); }
unsigned range_size() const { return Ranges.size(); }
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 88e7dc1..4b0bf57 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -66,6 +66,7 @@ def err_target_unknown_triple : Error<
"unknown target triple '%0', please use -triple or -arch">;
def err_target_unknown_cpu : Error<"unknown target CPU '%0'">;
def err_target_unknown_abi : Error<"unknown target ABI '%0'">;
+def err_target_unknown_cxxabi : Error<"unknown C++ ABI '%0'">;
def err_target_invalid_feature : Error<"invalid target feature '%0'">;
// Source manager
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index c7cad73..989ec38 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -18,7 +18,10 @@ def err_fe_invalid_ast_action : Error<"invalid action for AST input">,
DefaultFatal;
// Error generated by the backend.
def err_fe_inline_asm : Error<"%0">, CatInlineAsm;
-def note_fe_inline_asm_here : Note<"generated from here">;
+def note_fe_inline_asm_here : Note<"instantated into assembly here">;
+
+
+
def err_fe_invalid_code_complete_file : Error<
"cannot locate code-completion file %0">, DefaultFatal;
def err_fe_stdout_binary : Error<"unable to change standard output to binary">,
@@ -186,9 +189,6 @@ def warn_pch_math_errno : Error<
"math functions %select{do not respect|respect}0 'errno' in PCH "
"file but they are currently set to %select{not respect|respect}1 "
"'errno'">;
-def warn_pch_overflow_checking : Error<
- "signed integer overflow checking was %select{disabled|enabled}0 in PCH "
- "file but is currently %select{disabled|enabled}1">;
def warn_pch_optimize : Error<
"the macro '__OPTIMIZE__' was %select{not defined|defined}0 in "
"the PCH file but is currently %select{undefined|defined}1">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index b79bf8e..4907751 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -17,11 +17,14 @@ def Implicit : DiagGroup<"implicit", [
]>;
// Empty DiagGroups are recognized by clang but ignored.
+def : DiagGroup<"abi">;
def : DiagGroup<"address">;
def AddressOfTemporary : DiagGroup<"address-of-temporary">;
def : DiagGroup<"aggregate-return">;
+def AmbigMemberTemplate : DiagGroup<"ambiguous-member-template">;
def : DiagGroup<"attributes">;
def : DiagGroup<"bad-function-cast">;
+def BoolConversions : DiagGroup<"bool-conversions">;
def : DiagGroup<"c++-compat">;
def : DiagGroup<"cast-align">;
def : DiagGroup<"cast-qual">;
@@ -45,12 +48,14 @@ def CXXHexFloats : DiagGroup<"c++-hex-floats">;
def : DiagGroup<"c++0x-compat", [CXXHexFloats]>;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
def : DiagGroup<"idiomatic-parentheses">;
+def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
def : DiagGroup<"import">;
def : DiagGroup<"init-self">;
def : DiagGroup<"inline">;
def : DiagGroup<"int-to-pointer-cast">;
def : DiagGroup<"invalid-pch">;
def LiteralRange : DiagGroup<"literal-range">;
+def : DiagGroup<"main">;
def MissingBraces : DiagGroup<"missing-braces">;
def : DiagGroup<"missing-declarations">;
def : DiagGroup<"missing-format-attribute">;
@@ -62,6 +67,7 @@ def : DiagGroup<"newline-eof">;
def LongLong : DiagGroup<"long-long">;
def MismatchedTags : DiagGroup<"mismatched-tags">;
def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
+def InitializerOverrides : DiagGroup<"initializer-overrides">;
def NonNull : DiagGroup<"nonnull">;
def : DiagGroup<"nonportable-cfstrings">;
def : DiagGroup<"non-virtual-dtor">;
@@ -70,13 +76,17 @@ def : DiagGroup<"overflow">;
def : DiagGroup<"overloaded-virtual">;
def : DiagGroup<"packed">;
def PointerArith : DiagGroup<"pointer-arith">;
+def PoundWarning : DiagGroup<"#warnings">,
+ DiagCategory<"#warning Directive">;
def : DiagGroup<"pointer-to-int-cast">;
def : DiagGroup<"redundant-decls">;
def ReturnType : DiagGroup<"return-type">;
+def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy">;
def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
def : DiagGroup<"sequence-point">;
def Shadow : DiagGroup<"shadow">;
def : DiagGroup<"shorten-64-to-32">;
+def : DiagGroup<"sign-promo">;
def SignCompare : DiagGroup<"sign-compare">;
def : DiagGroup<"synth">;
@@ -108,6 +118,7 @@ def Trigraphs : DiagGroup<"trigraphs">;
def : DiagGroup<"type-limits">;
def Uninitialized : DiagGroup<"uninitialized">;
def UnknownPragmas : DiagGroup<"unknown-pragmas">;
+def UnknownAttributes : DiagGroup<"unknown-attributes">;
def UnusedArgument : DiagGroup<"unused-argument">;
def UnusedExceptionParameter : DiagGroup<"unused-exception-parameter">;
def UnusedFunction : DiagGroup<"unused-function">;
@@ -137,7 +148,7 @@ def Parentheses : DiagGroup<"parentheses", [DiagGroup<"idiomatic-parentheses">]>
// -Wconversion has its own warnings, but we split this one out for
// legacy reasons.
def Conversion : DiagGroup<"conversion",
- [DiagGroup<"shorten-64-to-32">]>,
+ [DiagGroup<"shorten-64-to-32">, BoolConversions]>,
DiagCategory<"Value Conversion Issue">;
def Unused : DiagGroup<"unused",
@@ -157,6 +168,8 @@ def Format2 : DiagGroup<"format=2",
def Extra : DiagGroup<"extra", [
MissingFieldInitializers,
+ IgnoredQualifiers,
+ InitializerOverrides,
SemiBeforeMethodBody,
SignCompare,
UnusedParameter
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
index 848e85c..21c93e7 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -106,7 +106,7 @@ def err_invalid_pth_file : Error<
//===----------------------------------------------------------------------===//
// Preprocessor Diagnostics
//===----------------------------------------------------------------------===//
-def pp_hash_warning : Warning<"#warning%0">, InGroup<DiagGroup<"#warnings">>;
+def pp_hash_warning : Warning<"#warning%0">, InGroup<PoundWarning>;
def pp_include_next_in_primary : Warning<
"#include_next in primary source file">;
def pp_include_macros_out_of_predefines : Error<
@@ -225,6 +225,9 @@ def err__Pragma_malformed : Error<
"_Pragma takes a parenthesized string literal">;
def err_pragma_comment_malformed : Error<
"pragma comment requires parenthesized identifier and optional string">;
+def err_pragma_message_malformed : Error<
+ "pragma message requires parenthesized string">;
+def warn_pragma_message : Warning<"%0">;
def warn_pragma_ignored : Warning<"unknown pragma ignored">,
InGroup<UnknownPragmas>, DefaultIgnore;
def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
index 934bd0d..ca761f9 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -24,7 +24,9 @@ def ext_empty_source_file : Extension<"ISO C forbids an empty source file">;
def ext_top_level_semi : Extension<
"extra ';' outside of a function">;
def ext_extra_struct_semi : Extension<
- "extra ';' inside a struct or union">;
+ "extra ';' inside a %0">;
+def ext_extra_ivar_semi : Extension<
+ "extra ';' inside instance variable list">;
def ext_duplicate_declspec : Extension<"duplicate '%0' declaration specifier">;
def ext_plain_complex : ExtWarn<
@@ -35,6 +37,7 @@ def ext_thread_before : Extension<"'__thread' before 'static'">;
def ext_empty_struct_union_enum : Extension<"use of empty %0 extension">;
+def error_empty_enum : Error<"use of empty enum">;
def err_invalid_sign_spec : Error<"'%0' cannot be signed or unsigned">;
def err_invalid_short_spec : Error<"'short %0' is invalid">;
def err_invalid_long_spec : Error<"'long %0' is invalid">;
@@ -103,7 +106,7 @@ def err_expected_fn_body : Error<
"expected function body after function declarator">;
def err_expected_method_body : Error<"expected method body">;
def err_invalid_token_after_toplevel_declarator : Error<
- "invalid token after top level declarator">;
+ "expected ';' after top level declarator">;
def err_expected_statement : Error<"expected statement">;
def err_expected_lparen_after : Error<"expected '(' after '%0'">;
def err_expected_lparen_after_id : Error<"expected '(' after %0">;
@@ -168,11 +171,15 @@ def err_typename_invalid_functionspec : Error<
def err_invalid_decl_spec_combination : Error<
"cannot combine with previous '%0' declaration specifier">;
def err_invalid_vector_decl_spec_combination : Error<
- "cannot combine with previous '%0' declaration specifier. '__vector' must be first">;
+ "cannot combine with previous '%0' declaration specifier. "
+ "'__vector' must be first">;
def err_invalid_pixel_decl_spec_combination : Error<
- "'__pixel' must be preceded by '__vector'. '%0' declaration specifier not allowed here">;
-def err_invalid_vector_double_decl_spec_combination : Error<
- "cannot use 'double' with '__vector'">;
+ "'__pixel' must be preceded by '__vector'. "
+ "'%0' declaration specifier not allowed here">;
+def err_invalid_vector_decl_spec : Error<
+ "cannot use '%0' with '__vector'">;
+def err_invalid_vector_bool_decl_spec : Error<
+ "cannot use '%0' with '__vector bool'">;
def warn_vector_long_decl_spec_combination : Warning<
"Use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
def err_friend_invalid_in_context : Error<
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 0ba31ae..01a37fb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -21,12 +21,6 @@ def ext_expr_not_ice : Extension<
"expression is not integer constant expression "
"(but is allowed as an extension)">;
-def ext_null_pointer_expr_not_ice : Extension<
- "null pointer expression is not an integer constant expression "
- "(but is allowed as an extension)">;
-
-
-
// Semantic analysis of constant literals.
def ext_predef_outside_function : Warning<
"predefined identifier is only valid inside function">;
@@ -80,9 +74,10 @@ def err_designator_for_scalar_init : Error<
"designator in initializer for scalar type %0">;
def warn_subobject_initializer_overrides : Warning<
"subobject initialization overrides initialization of other fields "
- "within its enclosing subobject">;
+ "within its enclosing subobject">, InGroup<InitializerOverrides>;
def warn_initializer_overrides : Warning<
- "initializer overrides prior initialization of this subobject">;
+ "initializer overrides prior initialization of this subobject">,
+ InGroup<InitializerOverrides>;
def note_previous_initializer : Note<
"previous initialization %select{|with side effects }0is here"
"%select{| (side effects may not occur at run time)}0">;
@@ -125,6 +120,9 @@ def warn_use_out_of_scope_declaration : Warning<
"use of out-of-scope declaration of %0">;
def err_inline_non_function : Error<
"'inline' can only appear on functions">;
+def warn_qual_return_type : Warning<
+ "'%0' type qualifier%s1 on return type %plural{1:has|:have}1 no effect">,
+ InGroup<IgnoredQualifiers>, DefaultIgnore;
def warn_decl_shadow :
Warning<"declaration shadows a %select{"
@@ -230,6 +228,7 @@ def err_main_arg_wrong : Error<"%select{first|second|third|fourth}0 "
/// parser diagnostics
def ext_typedef_without_a_name : ExtWarn<"typedef requires a name">;
+def err_typedef_not_identifier : Error<"typedef name must be an identifier">;
def err_statically_allocated_object : Error<
"interface type cannot be statically allocated">;
def err_object_cannot_be_passed_returned_by_value : Error<
@@ -458,6 +457,9 @@ def warn_weak_vtable : Warning<
"emitted in every translation unit">,
InGroup<DiagGroup<"weak-vtables">>, DefaultIgnore;
+def ext_using_undefined_std : ExtWarn<
+ "using directive refers to implicitly-defined namespace 'std'">;
+
// C++ exception specifications
def err_exception_spec_in_typedef : Error<
"exception specifications are not allowed in typedefs">;
@@ -486,6 +488,10 @@ def err_access : Error<
"%1 is a %select{private|protected}0 member of %3">, NoSFINAE;
def err_access_ctor : Error<
"calling a %select{private|protected}0 constructor of class %2">, NoSFINAE;
+def ext_rvalue_to_reference_access_ctor : ExtWarn<
+ "C++98 requires an accessible copy constructor for class %2 when binding "
+ "a reference to a temporary; was %select{private|protected}0">,
+ NoSFINAE, InGroup<BindToTemporaryCopy>;
def err_access_base : Error<
"%select{base class|inherited virtual base class}0 %1 has %select{private|"
"protected}3 %select{constructor|copy constructor|copy assignment operator|"
@@ -507,6 +513,9 @@ def err_access_dtor_vbase :
def err_access_dtor_temp :
Error<"temporary of type %0 has %select{private|protected}1 destructor">,
NoSFINAE;
+def err_access_dtor_exception :
+ Error<"exception object of type %0 has %select{private|protected}1 "
+ "destructor">, NoSFINAE;
def err_access_dtor_field :
Error<"field of type %1 has %select{private|protected}2 destructor">,
NoSFINAE;
@@ -549,6 +558,9 @@ def err_dependent_nested_name_spec : Error<
"parameter">;
def err_nested_name_member_ref_lookup_ambiguous : Error<
"lookup of %0 in member access expression is ambiguous">;
+def ext_nested_name_member_ref_lookup_ambiguous : ExtWarn<
+ "lookup of %0 in member access expression is ambiguous; using member of %1">,
+ InGroup<AmbigMemberTemplate>;
def note_ambig_member_ref_object_type : Note<
"lookup in the object type %0 refers here">;
def note_ambig_member_ref_scope : Note<
@@ -744,6 +756,13 @@ def err_temp_copy_no_viable : Error<
"returning object|throwing object|copying member subobject|copying array "
"element|allocating object|copying temporary|initializing base subobject|"
"initializing vector element}0 of type %1">;
+def ext_rvalue_to_reference_temp_copy_no_viable : ExtWarn<
+ "no viable constructor %select{copying variable|copying parameter|"
+ "returning object|throwing object|copying member subobject|copying array "
+ "element|allocating object|copying temporary|initializing base subobject|"
+ "initializing vector element}0 of type %1; C++98 requires a copy "
+ "constructor when binding a reference to a temporary">,
+ InGroup<BindToTemporaryCopy>;
def err_temp_copy_ambiguous : Error<
"ambiguous constructor call when %select{copying variable|copying "
"parameter|returning object|throwing object|copying member subobject|copying "
@@ -797,9 +816,15 @@ def err_attribute_wrong_number_arguments : Error<
"attribute requires %0 argument(s)">;
def err_attribute_missing_parameter_name : Error<
"attribute requires unquoted parameter">;
-def err_attribute_invalid_vector_type : Error<"invalid vector type %0">;
+def err_attribute_invalid_vector_type : Error<"invalid vector element type %0">;
def err_attribute_argument_not_int : Error<
"'%0' attribute requires integer constant">;
+def err_attribute_argument_outof_range : Error<
+ "init_priority attribute requires integer constant between "
+ "101 and 65535 inclusive">;
+def err_init_priority_object_attr : Error<
+ "can only use ‘init_priority’ attribute on file-scope definitions "
+ "of objects of class type">;
def err_attribute_argument_n_not_int : Error<
"'%0' attribute requires parameter %1 to be an integer constant">;
def err_attribute_argument_n_not_string : Error<
@@ -838,7 +863,8 @@ def err_attribute_address_space_too_high : Error<
def err_attribute_address_multiple_qualifiers : Error<
"multiple address spaces specified for type">;
def err_implicit_pointer_address_space_cast : Error<
- "illegal implicit cast between two pointers with different address spaces">;
+ "illegal implicit conversion between two pointers with different address "
+ "spaces">;
def err_as_qualified_auto_decl : Error<
"automatic variable qualified with an address space">;
def err_arg_with_address_space : Error<
@@ -854,6 +880,8 @@ def err_attribute_aligned_not_power_of_two : Error<
def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
"'%0' redeclared without %1 attribute: previous %1 ignored">;
def warn_attribute_ignored : Warning<"%0 attribute ignored">;
+def warn_unknown_attribute_ignored : Warning<
+ "unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
def warn_attribute_precede_definition : Warning<
"attribute declaration must precede definition">;
def warn_attribute_void_function_method : Warning<
@@ -899,30 +927,34 @@ def err_cconv_knr : Error<
"function with no prototype cannot use %0 calling convention">;
def err_cconv_varargs : Error<
"variadic function cannot use %0 calling convention">;
+def err_regparm_mismatch : Error<"function declared with with regparm(%0) "
+ "attribute was previously declared %plural{0:without the regparm|1:"
+ "with the regparm(1)|2:with the regparm(2)|3:with the regparm(3)|:with the"
+ "regparm}1 attribute">;
def warn_impcast_vector_scalar : Warning<
- "implicit cast turns vector to scalar: %0 to %1">,
+ "implicit conversion turns vector to scalar: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_complex_scalar : Warning<
- "implicit cast discards imaginary component: %0 to %1">,
+ "implicit conversion discards imaginary component: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_float_precision : Warning<
- "implicit cast loses floating-point precision: %0 to %1">,
+ "implicit conversion loses floating-point precision: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_float_integer : Warning<
- "implicit cast turns floating-point number into integer: %0 to %1">,
+ "implicit conversion turns floating-point number into integer: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_sign : Warning<
- "implicit cast changes signedness: %0 to %1">,
+ "implicit conversion changes signedness: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_sign_conditional : Warning<
"operand of ? changes signedness: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_precision : Warning<
- "implicit cast loses integer precision: %0 to %1">,
+ "implicit conversion loses integer precision: %0 to %1">,
InGroup<DiagGroup<"conversion">>, DefaultIgnore;
def warn_impcast_integer_64_32 : Warning<
- "implicit cast loses integer precision: %0 to %1">,
+ "implicit conversion loses integer precision: %0 to %1">,
InGroup<DiagGroup<"shorten-64-to-32">>, DefaultIgnore;
def warn_attribute_ignored_for_field_of_type : Warning<
@@ -937,8 +969,8 @@ def warn_transparent_union_attribute_not_definition : Warning<
"transparent_union attribute can only be applied to a union definition; "
"attribute ignored">;
def warn_transparent_union_attribute_floating : Warning<
- "first field of a transparent union cannot have floating point or vector "
- "type; transparent_union attribute ignored">;
+ "first field of a transparent union cannot have %select{floating point|"
+ "vector}0 type %1; transparent_union attribute ignored">;
def warn_transparent_union_attribute_zero_fields : Warning<
"transparent union definition must contain at least one field; "
"transparent_union attribute ignored">;
@@ -1077,6 +1109,9 @@ def err_ovl_ambiguous_member_call : Error<
"call to member function %0 is ambiguous">;
def err_ovl_deleted_member_call : Error<
"call to %select{unavailable|deleted}0 member function %1">;
+def note_ovl_too_many_candidates : Note<
+ "remaining %0 candidate%s0 omitted; "
+ "pass -fshow-overloads=all to show them">;
def note_ovl_candidate : Note<"candidate "
"%select{function|function|constructor|"
"function |function |constructor |"
@@ -1084,6 +1119,10 @@ def note_ovl_candidate : Note<"candidate "
"is the implicit copy constructor|"
"is the implicit copy assignment operator}0%1">;
+def warn_init_pointer_from_false : Warning<
+ "initialization of pointer of type %0 from literal 'false'">,
+ InGroup<BoolConversions>;
+
def note_ovl_candidate_bad_deduction : Note<
"candidate template ignored: failed template argument deduction">;
def note_ovl_candidate_incomplete_deduction : Note<"candidate template ignored: "
@@ -1168,6 +1207,17 @@ def note_ovl_candidate_bad_cvr : Note<"candidate "
"%select{const|volatile|const and volatile|restrict|const and restrict|"
"volatile and restrict|const, volatile, and restrict}3 qualifier"
"%select{||s||s|s|s}3">;
+def note_ovl_candidate_bad_base_to_derived_conv : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "function (the implicit copy assignment operator)}0%1"
+ " not viable: cannot %select{convert from|convert from|bind}2 "
+ "%select{base class pointer|superclass|base class object of type}2 %3 to "
+ "%select{derived class pointer|subclass|derived class reference}2 %4 for "
+ "%ordinal5 argument">;
+
def note_ambiguous_type_conversion: Note<
"because of ambiguity in conversion of %0 to %1">;
def note_ovl_builtin_binary_candidate : Note<
@@ -1234,6 +1284,7 @@ def err_template_param_different_kind : Error<
"%select{|template parameter }0redeclaration">;
def note_template_param_different_kind : Note<
"template parameter has a different kind in template argument">;
+
def err_template_nontype_parm_different_type : Error<
"template non-type parameter has a different type %0 in template "
"%select{|template parameter }1redeclaration">;
@@ -1528,6 +1579,8 @@ def err_explicit_instantiation_nontemplate_type : Error<
"explicit instantiation of non-templated type %0">;
def note_nontemplate_decl_here : Note<
"non-templated declaration is here">;
+def err_explicit_instantiation_in_class : Error<
+ "explicit instantiation of %0 in class scope">;
def err_explicit_instantiation_out_of_scope : Error<
"explicit instantiation of %0 not in a namespace enclosing %1">;
def err_explicit_instantiation_must_be_global : Error<
@@ -1560,10 +1613,9 @@ def note_explicit_instantiation_candidate : Note<
"explicit instantiation candidate function template here %0">;
def err_explicit_instantiation_inline : Error<
"explicit instantiation cannot be 'inline'">;
-def err_explicit_instantiation_without_qualified_id : Error<
- "qualifier in explicit instantiation of %q0 requires a template-id">;
-def err_explicit_instantiation_without_qualified_id_quals : Error<
- "qualifier in explicit instantiation of '%0%1' requires a template-id">;
+def ext_explicit_instantiation_without_qualified_id : ExtWarn<
+ "qualifier in explicit instantiation of %q0 requires a template-id "
+ "(a typedef is not permitted)">;
def err_explicit_instantiation_unqualified_wrong_namespace : Error<
"explicit instantiation of %q0 must occur in %1">;
def warn_explicit_instantiation_unqualified_wrong_namespace_0x : Warning<
@@ -1588,6 +1640,8 @@ def note_typename_refers_here : Note<
"referenced member %0 is declared here">;
def err_typename_missing : Error<
"missing 'typename' prior to dependent type name '%0%1'">;
+def ext_typename_outside_of_template : ExtWarn<
+ "'typename' occurs outside of a template">;
def err_template_kw_refers_to_non_template : Error<
"%0 following the 'template' keyword does not refer to a template">;
@@ -1599,6 +1653,8 @@ def note_referenced_class_template : Error<
"class template declared here">;
def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
+def ext_template_outside_of_template : ExtWarn<
+ "'template' keyword outside of a template">;
// C++0x Variadic Templates
def err_template_param_pack_default_arg : Error<
@@ -1606,6 +1662,18 @@ def err_template_param_pack_default_arg : Error<
def err_template_param_pack_must_be_last_template_parameter : Error<
"template parameter pack must be the last template parameter">;
+def err_template_parameter_pack_non_pack : Error<
+ "template %select{type|non-type|template}0 parameter%select{| pack}1 "
+ "conflicts with previous template %select{type|non-type|template}0 "
+ "parameter%select{ pack|}1">;
+def note_template_parameter_pack_non_pack : Note<
+ "template %select{type|non-type|template}0 parameter%select{| pack}1 "
+ "does not match template %select{type|non-type|template}0 "
+ "parameter%select{ pack|}1 in template argument">;
+def note_template_parameter_pack_here : Note<
+ "previous template %select{type|non-type|template}0 "
+ "parameter%select{| pack}1 declared here">;
+
def err_unexpected_typedef : Error<
"unexpected type name %0: expected expression">;
def err_unexpected_namespace : Error<
@@ -1674,6 +1742,9 @@ def ext_forward_ref_enum : Extension<
"ISO C forbids forward references to 'enum' types">;
def err_forward_ref_enum : Error<
"ISO C++ forbids forward references to 'enum' types">;
+def ext_forward_ref_enum_def : Extension<
+ "redeclaration of already-defined enum %0 is a GNU extension">, InGroup<GNU>;
+
def err_redefinition_of_enumerator : Error<"redefinition of enumerator %0">;
def err_duplicate_member : Error<"duplicate member %0">;
def err_misplaced_ivar : Error<
@@ -1848,6 +1919,8 @@ def err_illegal_decl_array_of_functions : Error<
"'%0' declared as array of functions of type %1">;
def err_illegal_decl_array_incomplete_type : Error<
"array has incomplete element type %0">;
+def err_illegal_message_expr_incomplete_type : Error<
+ "objective-c message has incomplete result type %0">;
def err_illegal_decl_array_of_references : Error<
"'%0' declared as array of references of type %1">;
def err_array_star_outside_prototype : Error<
@@ -1914,6 +1987,10 @@ def note_precedence_bitwise_first : Note<
"place parentheses around the %0 expression to evaluate it first">;
def note_precedence_bitwise_silence : Note<
"place parentheses around the %0 expression to silence this warning">;
+
+def warn_logical_instead_of_bitwise : Warning<
+ "use of logical %0 with constant operand; switch to bitwise %1 or "
+ "remove constant">, InGroup<DiagGroup<"logical-bitwise-confusion">>;
def err_sizeof_nonfragile_interface : Error<
"invalid application of '%select{alignof|sizeof}1' to interface %0 in "
@@ -2028,6 +2105,11 @@ def err_typecheck_unary_expr : Error<
"invalid argument type %0 to unary expression">;
def err_typecheck_indirection_requires_pointer : Error<
"indirection requires pointer operand (%0 invalid)">;
+def warn_indirection_through_null : Warning<
+ "indirection of non-volatile null pointer will be deleted, not trap">;
+def note_indirection_through_null : Note<
+ "consider using __builtin_trap() or qualifying pointer with 'volatile'">;
+
def err_indirection_requires_nonfragile_object : Error<
"indirection cannot be to an interface in non-fragile ABI (%0 invalid)">;
def err_direct_interface_unsupported : Error<
@@ -2046,8 +2128,12 @@ def ext_typecheck_ordered_comparison_of_function_pointers : ExtWarn<
"ordered comparison of function pointers (%0 and %1)">;
def ext_typecheck_comparison_of_fptr_to_void : Extension<
"equality comparison between function pointer and void pointer (%0 and %1)">;
+def err_typecheck_comparison_of_fptr_to_void : Error<
+ "equality comparison between function pointer and void pointer (%0 and %1)">;
def ext_typecheck_comparison_of_pointer_integer : ExtWarn<
"comparison between pointer and integer (%0 and %1)">;
+def err_typecheck_comparison_of_pointer_integer : Error<
+ "comparison between pointer and integer (%0 and %1)">;
def ext_typecheck_comparison_of_distinct_pointers : ExtWarn<
"comparison of distinct pointer types (%0 and %1)">;
def ext_typecheck_cond_incompatible_operands : ExtWarn<
@@ -2081,9 +2167,11 @@ def err_invalid_member_use_in_static_method : Error<
"invalid use of member %0 in static member function">;
def err_invalid_qualified_function_type : Error<
"type qualifier is not allowed on this function">;
+def err_invalid_qualified_function_pointer : Error<
+ "type qualifier is not allowed on this function %select{pointer|reference}0">;
def err_invalid_qualified_typedef_function_type_use : Error<
- "a qualified function type cannot be used to declare a nonmember function "
- "or a static member function">;
+ "a qualified function type cannot be used to declare a "
+ "%select{static member|nonmember}0 function">;
def err_invalid_non_static_member_use : Error<
"invalid use of nonstatic data member %0">;
@@ -2260,13 +2348,26 @@ def err_new_array_nonconst : Error<
"only the first dimension of an allocated array may have dynamic size">;
def err_new_array_init_args : Error<
"array 'new' cannot have initialization arguments">;
-def err_new_paren_array_nonconst : Error<
+def ext_new_paren_array_nonconst : ExtWarn<
"when type is in parentheses, array cannot have dynamic size">;
def err_placement_new_non_placement_delete : Error<
"'new' expression with placement arguments refers to non-placement "
"'operator delete'">;
def err_array_size_not_integral : Error<
"array size expression must have integral or enumerated type, not %0">;
+def err_array_size_incomplete_type : Error<
+ "array size expression has incomplete class type %0">;
+def err_array_size_explicit_conversion : Error<
+ "array size expression of type %0 requires explicit conversion to type %1">;
+def note_array_size_conversion : Note<
+ "conversion to %select{integral|enumeration}0 type %1 declared here">;
+def err_array_size_ambiguous_conversion : Error<
+ "ambiguous conversion of array size expression of type %0 to an integral or "
+ "enumeration type">;
+def ext_array_size_conversion : Extension<
+ "implicit conversion from array size expression of type %0 to "
+ "%select{integral|enumeration}1 type %2 is a C++0x extension">;
+
def err_default_init_const : Error<
"default initialization of an object of const type %0"
"%select{| requires a user-provided default constructor}1">;
@@ -2866,11 +2967,17 @@ def warn_printf_asterisk_missing_arg : Warning<
def warn_printf_asterisk_wrong_type : Warning<
"field %select{width|precision}0 should have type %1, but argument has type %2">,
InGroup<Format>;
-def warn_printf_nonsensical_precision: Warning<
- "precision used in '%0' conversion specifier (where it has no meaning)">,
+def warn_printf_nonsensical_optional_amount: Warning<
+ "%select{field width|precision}0 used with '%1' conversion specifier, resulting in undefined behavior">,
InGroup<Format>;
def warn_printf_nonsensical_flag: Warning<
- "flag '%0' results in undefined behavior in '%1' conversion specifier">,
+ "flag '%0' results in undefined behavior with '%1' conversion specifier">,
+ InGroup<Format>;
+def warn_printf_nonsensical_length: Warning<
+ "length modifier '%0' results in undefined behavior or no effect with '%1' conversion specifier">,
+ InGroup<Format>;
+def warn_printf_ignored_flag: Warning<
+ "flag '%0' is ignored when flag '%1' is present">,
InGroup<Format>;
// CHECK: returning address/reference of stack memory
@@ -2886,8 +2993,10 @@ def err_ret_local_block : Error<
// For non-floating point, expressions of the form x == x or x != x
// should result in a warning, since these always evaluate to a constant.
-def warn_selfcomparison : Warning<
- "self-comparison always results in a constant value">;
+// Array comparisons have similar warnings
+def warn_comparison_always : Warning<
+ "%select{self-|array }0comparison always evaluates to %select{false|true|a constant}1">;
+
def warn_stringcompare : Warning<
"result of comparison against %select{a string literal|@encode}0 is "
"unspecified (use strncmp instead)">;
@@ -2956,13 +3065,13 @@ def err_first_argument_to_va_arg_not_of_type_va_list : Error<
"first argument to 'va_arg' is of type %0 and not 'va_list'">;
def warn_return_missing_expr : Warning<
- "non-void %select{function|method}1 %0 should return a value">,
+ "non-void %select{function|method}1 %0 should return a value">, DefaultError,
InGroup<ReturnType>;
def ext_return_missing_expr : ExtWarn<
- "non-void %select{function|method}1 %0 should return a value">,
+ "non-void %select{function|method}1 %0 should return a value">, DefaultError,
InGroup<ReturnType>;
def ext_return_has_expr : ExtWarn<
- "void %select{function|method}1 %0 should not return a value">,
+ "void %select{function|method}1 %0 should not return a value">, DefaultError,
InGroup<ReturnType>;
def ext_return_has_void_expr : Extension<
"void %select{function|method}1 %0 should not return void expression">;
@@ -2993,6 +3102,8 @@ def err_vector_incorrect_num_initializers : Error<
"%select{too many|too few}0 elements in vector initialization (expected %1 elements, have %2)">;
def err_altivec_empty_initializer : Error<"expected initializer">;
+def err_invalid_neon_type_code : Error<
+ "incompatible constant for this __builtin_neon function">;
def err_argument_invalid_range : Error<
"argument should be a value from %0 to %1">;
@@ -3003,7 +3114,9 @@ def err_constant_integer_arg_type : Error<
"argument to %0 must be a constant integer">;
def ext_mixed_decls_code : Extension<
- "ISO C90 forbids mixing declarations and code">;
+ "ISO C90 forbids mixing declarations and code">,
+ InGroup<DiagGroup<"declaration-after-statement">>;
+
def err_non_variable_decl_in_for : Error<
"declaration of non-local variable in 'for' loop">;
def err_toomany_element_decls : Error<
@@ -3094,6 +3207,11 @@ def err_undeclared_protocol_suggest : Error<
"cannot find protocol declaration for %0; did you mean %1?">;
def note_base_class_specified_here : Note<
"base class %0 specified here">;
+def err_using_directive_suggest : Error<
+ "no namespace named %0; did you mean %1?">;
+def err_using_directive_member_suggest : Error<
+ "no namespace named %0 in %1; did you mean %2?">;
+def note_namespace_defined_here : Note<"namespace %0 defined here">;
} // end of sema category
} // end of sema component.
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
index d0e0118..e71f51a 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -189,7 +189,7 @@ public:
/// getDirectory - Lookup, cache, and verify the specified directory. This
/// returns null if the directory doesn't exist.
///
- const DirectoryEntry *getDirectory(const llvm::StringRef &Filename) {
+ const DirectoryEntry *getDirectory(llvm::StringRef Filename) {
return getDirectory(Filename.begin(), Filename.end());
}
const DirectoryEntry *getDirectory(const char *FileStart,const char *FileEnd);
@@ -197,7 +197,7 @@ public:
/// getFile - Lookup, cache, and verify the specified file. This returns null
/// if the file doesn't exist.
///
- const FileEntry *getFile(const llvm::StringRef &Filename) {
+ const FileEntry *getFile(llvm::StringRef Filename) {
return getFile(Filename.begin(), Filename.end());
}
const FileEntry *getFile(const char *FilenameStart,
@@ -206,8 +206,8 @@ public:
/// \brief Retrieve a file entry for a "virtual" file that acts as
/// if there were a file with the given name on disk. The file
/// itself is not accessed.
- const FileEntry *getVirtualFile(const llvm::StringRef &Filename,
- off_t Size, time_t ModificationTime);
+ const FileEntry *getVirtualFile(llvm::StringRef Filename, off_t Size,
+ time_t ModificationTime);
void PrintStats() const;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
index 582d59c..6b8bcdc 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -89,7 +89,8 @@ public:
// The 'this' pointer really points to a
// std::pair<IdentifierInfo, const char*>, where internal pointer
// points to the external string data.
- return ((std::pair<IdentifierInfo, const char*>*) this)->second;
+ typedef std::pair<IdentifierInfo, const char*> actualtype;
+ return ((const actualtype*) this)->second;
}
/// getLength - Efficiently return the length of this identifier info.
@@ -101,7 +102,8 @@ public:
// The 'this' pointer really points to a
// std::pair<IdentifierInfo, const char*>, where internal pointer
// points to the external string data.
- const char* p = ((std::pair<IdentifierInfo, const char*>*) this)->second-2;
+ typedef std::pair<IdentifierInfo, const char*> actualtype;
+ const char* p = ((const actualtype*) this)->second - 2;
return (((unsigned) p[0]) | (((unsigned) p[1]) << 8)) - 1;
}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
index bf44947..c18749d 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
@@ -67,9 +67,6 @@ public:
unsigned MathErrno : 1; // Math functions must respect errno
// (modulo the platform support).
- unsigned OverflowChecking : 1; // Extension to call a handler function when
- // signed integer arithmetic overflows.
-
unsigned HeinousExtensions : 1; // Extensions that we really don't like and
// may be ripped out at any time.
@@ -103,20 +100,21 @@ public:
unsigned DumpRecordLayouts : 1; /// Dump the layout of IRgen'd records.
unsigned DumpVTableLayouts : 1; /// Dump the layouts of emitted vtables.
unsigned NoConstantCFStrings : 1; // Do not do CF strings
+ unsigned InlineVisibilityHidden : 1; // Whether inline C++ methods have
+ // hidden visibility by default.
+ unsigned SpellChecking : 1; // Whether to perform spell-checking for error
+ // recovery.
// FIXME: This is just a temporary option, for testing purposes.
unsigned NoBitFieldTypeAlign : 1;
private:
- unsigned GC : 2; // Objective-C Garbage Collection modes. We
- // declare this enum as unsigned because MSVC
- // insists on making enums signed. Set/Query
- // this value using accessors.
+ // We declare multibit enums as unsigned because MSVC insists on making enums
+ // signed. Set/Query these values using accessors.
+ unsigned GC : 2; // Objective-C Garbage Collection modes.
unsigned SymbolVisibility : 3; // Symbol's visibility.
- unsigned StackProtector : 2; // Whether stack protectors are on. We declare
- // this enum as unsigned because MSVC insists
- // on making enums signed. Set/Query this
- // value using accessors.
+ unsigned StackProtector : 2; // Whether stack protectors are on.
+ unsigned SignedOverflowBehavior : 2; // How to handle signed integer overflow.
public:
unsigned InstantiationDepth; // Maximum template instantiation depth.
@@ -130,13 +128,19 @@ public:
Protected,
Hidden
};
+
+ enum SignedOverflowBehaviorTy {
+ SOB_Undefined, // Default C standard behavior.
+ SOB_Defined, // -fwrapv
+ SOB_Trapping // -ftrapv
+ };
LangOptions() {
Trigraphs = BCPLComment = Bool = DollarIdents = AsmPreprocessor = 0;
GNUMode = GNUKeywords = ImplicitInt = Digraphs = 0;
HexFloats = 0;
GC = ObjC1 = ObjC2 = ObjCNonFragileABI = ObjCNonFragileABI2 = 0;
- NoConstantCFStrings = 0;
+ NoConstantCFStrings = 0; InlineVisibilityHidden = 0;
C99 = Microsoft = CPlusPlus = CPlusPlus0x = 0;
CXXOperatorNames = PascalStrings = WritableStrings = ConstStrings = 0;
Exceptions = SjLjExceptions = Freestanding = NoBuiltin = 0;
@@ -147,20 +151,19 @@ public:
AltiVec = OpenCL = StackProtector = 0;
SymbolVisibility = (unsigned) Default;
-
+
ThreadsafeStatics = 1;
POSIXThreads = 0;
Blocks = 0;
EmitAllDecls = 0;
MathErrno = 1;
-
+ SignedOverflowBehavior = SOB_Undefined;
+
AssumeSaneOperatorNew = 1;
-
- // FIXME: The default should be 1.
- AccessControl = 0;
+ AccessControl = 1;
ElideConstructors = 1;
- OverflowChecking = 0;
+ SignedOverflowBehavior = 0;
ObjCGCBitmapPrint = 0;
InstantiationDepth = 1024;
@@ -179,6 +182,7 @@ public:
CatchUndefined = 0;
DumpRecordLayouts = 0;
DumpVTableLayouts = 0;
+ SpellChecking = 1;
NoBitFieldTypeAlign = 0;
}
@@ -196,6 +200,13 @@ public:
return (VisibilityMode) SymbolVisibility;
}
void setVisibilityMode(VisibilityMode v) { SymbolVisibility = (unsigned) v; }
+
+ SignedOverflowBehaviorTy getSignedOverflowBehavior() const {
+ return (SignedOverflowBehaviorTy)SignedOverflowBehavior;
+ }
+ void setSignedOverflowBehavior(SignedOverflowBehaviorTy V) {
+ SignedOverflowBehavior = (unsigned)V;
+ }
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Makefile b/contrib/llvm/tools/clang/include/clang/Basic/Makefile
index 48f7f9d..7db3e29 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Makefile
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Makefile
@@ -1,16 +1,33 @@
-LEVEL = ../../../../..
-BUILT_SOURCES = DiagnosticAnalysisKinds.inc DiagnosticASTKinds.inc \
+CLANG_LEVEL := ../../..
+BUILT_SOURCES = \
+ DiagnosticAnalysisKinds.inc DiagnosticASTKinds.inc \
DiagnosticCommonKinds.inc DiagnosticDriverKinds.inc \
DiagnosticFrontendKinds.inc DiagnosticLexKinds.inc \
DiagnosticParseKinds.inc DiagnosticSemaKinds.inc \
- DiagnosticGroups.inc
+ DiagnosticGroups.inc AttrList.inc arm_neon.inc \
+ Version.inc
TABLEGEN_INC_FILES_COMMON = 1
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
INPUT_TDS = $(wildcard $(PROJ_SRC_DIR)/Diagnostic*.td)
+# Compute the Clang version from the LLVM version, unless specified explicitly.
+ifndef CLANG_VERSION
+CLANG_VERSION := $(subst svn,,$(LLVMVersion))
+endif
+
+CLANG_VERSION_COMPONENTS := $(subst ., ,$(CLANG_VERSION))
+CLANG_VERSION_MAJOR := $(word 1,$(CLANG_VERSION_COMPONENTS))
+CLANG_VERSION_MINOR := $(word 2,$(CLANG_VERSION_COMPONENTS))
+CLANG_VERSION_PATCHLEVEL := $(word 3,$(CLANG_VERSION_COMPONENTS))
+ifeq ($(CLANG_VERSION_PATCHLEVEL),)
+CLANG_HAS_VERSION_PATCHLEVEL := 0
+else
+CLANG_HAS_VERSION_PATCHLEVEL := 1
+endif
+
$(ObjDir)/Diagnostic%Kinds.inc.tmp : Diagnostic.td Diagnostic%Kinds.td $(TBLGEN) $(ObjDir)/.dir
$(Echo) "Building Clang $(patsubst Diagnostic%Kinds.inc.tmp,%,$(@F)) diagnostic tables with tblgen"
$(Verb) $(TableGen) -gen-clang-diags-defs -clang-component=$(patsubst Diagnostic%Kinds.inc.tmp,%,$(@F)) -o $(call SYSPATH, $@) $<
@@ -19,4 +36,20 @@ $(ObjDir)/DiagnosticGroups.inc.tmp : Diagnostic.td DiagnosticGroups.td $(INPUT_T
$(Echo) "Building Clang diagnostic groups with tblgen"
$(Verb) $(TableGen) -gen-clang-diag-groups -o $(call SYSPATH, $@) $<
+$(ObjDir)/AttrList.inc.tmp : Attr.td $(TBLGEN) $(ObjDir)/.dir
+ $(Echo) "Building Clang attribute list with tblgen"
+ $(Verb) $(TableGen) -gen-clang-attr-list -o $(call SYSPATH, $@) \
+ -I $(PROJ_SRC_DIR)/../.. $<
+
+$(ObjDir)/arm_neon.inc.tmp : arm_neon.td $(TBLGEN) $(ObjDir)/.dir
+ $(Echo) "Building Clang arm_neon.inc with tblgen"
+ $(Verb) $(TableGen) -gen-arm-neon-sema -o $(call SYSPATH, $@) $<
+$(ObjDir)/Version.inc.tmp : Version.inc.in Makefile $(LLVM_OBJ_ROOT)/Makefile.config $(ObjDir)/.dir
+ $(Echo) "Updating Clang version info."
+ $(Verb)sed -e "s#@CLANG_VERSION@#$(CLANG_VERSION)#g" \
+ -e "s#@CLANG_VERSION_MAJOR@#$(CLANG_VERSION_MAJOR)#g" \
+ -e "s#@CLANG_VERSION_MINOR@#$(CLANG_VERSION_MINOR)#g" \
+ -e "s#@CLANG_VERSION_PATCHLEVEL@#$(CLANG_VERSION_PATCHLEVEL)#g" \
+ -e "s#@CLANG_HAS_VERSION_PATCHLEVEL@#$(CLANG_HAS_VERSION_PATCHLEVEL)#g" \
+ $< > $@
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
index 89fae87..cd0da97 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
@@ -59,7 +59,7 @@ public:
/// DiagRanges - The list of ranges added to this diagnostic. It currently
/// only support 10 ranges, could easily be extended if needed.
- SourceRange DiagRanges[10];
+ CharSourceRange DiagRanges[10];
enum { MaxFixItHints = 3 };
@@ -142,7 +142,7 @@ private:
DiagStorage = 0;
}
- void AddSourceRange(const SourceRange &R) const {
+ void AddSourceRange(const CharSourceRange &R) const {
if (!DiagStorage)
DiagStorage = getStorage();
@@ -264,10 +264,16 @@ public:
friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const SourceRange &R) {
- PD.AddSourceRange(R);
+ PD.AddSourceRange(CharSourceRange::getTokenRange(R));
return PD;
}
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const CharSourceRange &R) {
+ PD.AddSourceRange(R);
+ return PD;
+ }
+
friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const FixItHint &Hint) {
PD.AddFixItHint(Hint);
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
index 0bbeffe..35f27fb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -172,6 +172,56 @@ public:
return B != X.B || E != X.E;
}
};
+
+/// CharSourceRange - This class represents a character granular source range.
+/// The underlying SourceRange can either specify the starting/ending character
+/// of the range, or it can specify the start or the range and the start of the
+/// last token of the range (a "token range"). In the token range case, the
+/// size of the last token must be measured to determine the actual end of the
+/// range.
+class CharSourceRange {
+ SourceRange Range;
+ bool IsTokenRange;
+public:
+ CharSourceRange() : IsTokenRange(false) {}
+ CharSourceRange(SourceRange R, bool ITR) : Range(R),IsTokenRange(ITR){}
+
+ static CharSourceRange getTokenRange(SourceRange R) {
+ CharSourceRange Result;
+ Result.Range = R;
+ Result.IsTokenRange = true;
+ return Result;
+ }
+
+ static CharSourceRange getCharRange(SourceRange R) {
+ CharSourceRange Result;
+ Result.Range = R;
+ Result.IsTokenRange = false;
+ return Result;
+ }
+
+ static CharSourceRange getTokenRange(SourceLocation B, SourceLocation E) {
+ return getTokenRange(SourceRange(B, E));
+ }
+ static CharSourceRange getCharRange(SourceLocation B, SourceLocation E) {
+ return getCharRange(SourceRange(B, E));
+ }
+
+ /// isTokenRange - Return true if the end of this range specifies the start of
+ /// the last token. Return false if the end of this range specifies the last
+ /// character in the range.
+ bool isTokenRange() const { return IsTokenRange; }
+
+ SourceLocation getBegin() const { return Range.getBegin(); }
+ SourceLocation getEnd() const { return Range.getEnd(); }
+ const SourceRange &getAsRange() const { return Range; }
+
+ void setBegin(SourceLocation b) { Range.setBegin(b); }
+ void setEnd(SourceLocation e) { Range.setEnd(e); }
+
+ bool isValid() const { return Range.isValid(); }
+ bool isInvalid() const { return !isValid(); }
+};
/// FullSourceLoc - A SourceLocation and its associated SourceManager. Useful
/// for argument passing to functions that expect both objects.
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
index 60c94a6..a2f6973 100644
--- a/contrib/llvm/tools/clang/include/clang/AST/StmtNodes.td
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -1,4 +1,6 @@
-class Stmt<bit abstract = 0> {
+class AttrSubject;
+
+class Stmt<bit abstract = 0> : AttrSubject {
bit Abstract = abstract;
}
@@ -93,11 +95,10 @@ def CXXNullPtrLiteralExpr : DStmt<Expr>;
def CXXThisExpr : DStmt<Expr>;
def CXXThrowExpr : DStmt<Expr>;
def CXXDefaultArgExpr : DStmt<Expr>;
-def CXXZeroInitValueExpr : DStmt<Expr>;
+def CXXScalarValueInitExpr : DStmt<Expr>;
def CXXNewExpr : DStmt<Expr>;
def CXXDeleteExpr : DStmt<Expr>;
def CXXPseudoDestructorExpr : DStmt<Expr>;
-def UnresolvedLookupExpr : DStmt<Expr>;
def UnaryTypeTraitExpr : DStmt<Expr>;
def DependentScopeDeclRefExpr : DStmt<Expr>;
def CXXConstructExpr : DStmt<Expr>;
@@ -107,7 +108,9 @@ def CXXExprWithTemporaries : DStmt<Expr>;
def CXXTemporaryObjectExpr : DStmt<CXXConstructExpr>;
def CXXUnresolvedConstructExpr : DStmt<Expr>;
def CXXDependentScopeMemberExpr : DStmt<Expr>;
-def UnresolvedMemberExpr : DStmt<Expr>;
+def OverloadExpr : DStmt<Expr, 1>;
+def UnresolvedLookupExpr : DStmt<OverloadExpr>;
+def UnresolvedMemberExpr : DStmt<OverloadExpr>;
// Obj-C Expressions.
def ObjCStringLiteral : DStmt<Expr>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
index 00fd9b9..9f7debf 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -51,14 +51,17 @@ protected:
unsigned char FloatWidth, FloatAlign;
unsigned char DoubleWidth, DoubleAlign;
unsigned char LongDoubleWidth, LongDoubleAlign;
+ unsigned char LargeArrayMinWidth, LargeArrayAlign;
unsigned char LongWidth, LongAlign;
unsigned char LongLongWidth, LongLongAlign;
const char *DescriptionString;
const char *UserLabelPrefix;
const llvm::fltSemantics *FloatFormat, *DoubleFormat, *LongDoubleFormat;
unsigned char RegParmMax, SSERegParmMax;
+ std::string CXXABI;
unsigned HasAlignMac68kSupport : 1;
+ unsigned RealTypeUsesObjCFPRet : 3;
// TargetInfo Constructor. Default initializes all fields.
TargetInfo(const std::string &T);
@@ -85,6 +88,13 @@ public:
SignedLongLong,
UnsignedLongLong
};
+
+ enum RealType {
+ Float = 0,
+ Double,
+ LongDouble
+ };
+
protected:
IntType SizeType, IntMaxType, UIntMaxType, PtrDiffType, IntPtrType, WCharType,
WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType;
@@ -194,6 +204,11 @@ public:
return *LongDoubleFormat;
}
+ // getLargeArrayMinWidth/Align - Return the minimum array size that is
+ // 'large' and its alignment.
+ unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; }
+ unsigned getLargeArrayAlign() const { return LargeArrayAlign; }
+
/// getIntMaxTWidth - Return the size of intmax_t and uintmax_t for this
/// target, in bits.
unsigned getIntMaxTWidth() const {
@@ -226,6 +241,12 @@ public:
/// integer type enum. For example, SignedLong -> "L".
static const char *getTypeConstantSuffix(IntType T);
+ /// \brief Check whether the given real type should use the "fpret" flavor of
+ /// Obj-C message passing on this target.
+ bool useObjCFPRetForRealType(RealType T) const {
+ return RealTypeUsesObjCFPRet & (1 << T);
+ }
+
///===---- Other target property query methods --------------------------===//
/// getTargetDefines - Appends the target-specific #define values for this
@@ -390,6 +411,11 @@ public:
return "";
}
+ /// getCXXABI - Get the C++ ABI in use.
+ virtual llvm::StringRef getCXXABI() const {
+ return CXXABI;
+ }
+
/// setCPU - Target the specific CPU.
///
/// \return - False on error (invalid CPU name).
@@ -406,6 +432,16 @@ public:
return false;
}
+ /// setCXXABI - Use this specific C++ ABI.
+ ///
+ /// \return - False on error (invalid ABI name).
+ virtual bool setCXXABI(const std::string &Name) {
+ if (Name != "itanium" && Name != "microsoft")
+ return false;
+ CXXABI = Name;
+ return true;
+ }
+
/// setFeatureEnabled - Enable or disable a specific target feature,
/// the feature name must be valid.
///
@@ -450,7 +486,12 @@ public:
return -1;
}
-
+ /// getStaticInitSectionSpecifier - Return the section to use for C++ static
+ /// initialization functions.
+ virtual const char *getStaticInitSectionSpecifier() const {
+ return 0;
+ }
+
protected:
virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
return PointerWidth;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
index eeaab15..19b0cbb 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
@@ -19,6 +19,10 @@ namespace clang {
class TargetOptions {
public:
+ TargetOptions() {
+ CXXABI = "itanium";
+ }
+
/// If given, the name of the target triple to compile for. If not given the
/// target will be selected to match the host.
std::string Triple;
@@ -29,6 +33,10 @@ public:
/// If given, the name of the target ABI to use.
std::string ABI;
+ /// If given, the name of the target C++ ABI to use. If not given, defaults
+ /// to "itanium".
+ std::string CXXABI;
+
/// The list of target specific features to enable or disable -- this should
/// be a list of strings starting with by '+' or '-'.
std::vector<std::string> Features;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.h b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
index b3b6184..9948677 100644
--- a/contrib/llvm/tools/clang/include/clang/Basic/Version.h
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
@@ -17,15 +17,7 @@
#include "llvm/ADT/StringRef.h"
-/// \brief Clang major version
-#define CLANG_VERSION_MAJOR 2
-
-// FIXME: Updates to this file must also update CMakeLists.txt and VER.
-/// \brief Clang minor version
-#define CLANG_VERSION_MINOR 0
-
-/// \brief Clang patchlevel version
-// #define CLANG_VERSION_PATCHLEVEL 1
+#include "clang/Basic/Version.inc"
/// \brief Helper macro for CLANG_VERSION_STRING.
#define CLANG_MAKE_VERSION_STRING2(X) #X
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.inc.in b/contrib/llvm/tools/clang/include/clang/Basic/Version.inc.in
new file mode 100644
index 0000000..ccf8430
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.inc.in
@@ -0,0 +1,6 @@
+#define CLANG_VERSION @CLANG_VERSION@
+#define CLANG_VERSION_MAJOR @CLANG_VERSION_MAJOR@
+#define CLANG_VERSION_MINOR @CLANG_VERSION_MINOR@
+#if @CLANG_HAS_VERSION_PATCHLEVEL@
+#define CLANG_VERSION_PATCHLEVEL @CLANG_VERSION_PATCHLEVEL@
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
new file mode 100644
index 0000000..b42755c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
@@ -0,0 +1,341 @@
+//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM NEON header
+// file will be generated. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+class Op;
+
+def OP_NONE : Op;
+def OP_ADD : Op;
+def OP_SUB : Op;
+def OP_MUL : Op;
+def OP_MLA : Op;
+def OP_MLS : Op;
+def OP_MUL_N : Op;
+def OP_MLA_N : Op;
+def OP_MLS_N : Op;
+def OP_EQ : Op;
+def OP_GE : Op;
+def OP_LE : Op;
+def OP_GT : Op;
+def OP_LT : Op;
+def OP_NEG : Op;
+def OP_NOT : Op;
+def OP_AND : Op;
+def OP_OR : Op;
+def OP_XOR : Op;
+def OP_ANDN : Op;
+def OP_ORN : Op;
+def OP_CAST : Op;
+def OP_HI : Op;
+def OP_LO : Op;
+def OP_CONC : Op;
+def OP_DUP : Op;
+def OP_SEL : Op;
+def OP_REV64 : Op;
+def OP_REV32 : Op;
+def OP_REV16 : Op;
+
+class Inst <string p, string t, Op o> {
+ string Prototype = p;
+ string Types = t;
+ Op Operand = o;
+ bit isShift = 0;
+}
+
+// Used to generate Builtins.def
+class SInst<string p, string t> : Inst<p, t, OP_NONE> {}
+class IInst<string p, string t> : Inst<p, t, OP_NONE> {}
+class WInst<string p, string t> : Inst<p, t, OP_NONE> {}
+
+// prototype: return (arg, arg, ...)
+// v: void
+// t: best-fit integer (int/poly args)
+// x: signed integer (int/float args)
+// u: unsigned integer (int/float args)
+// f: float (int args)
+// d: default
+// w: double width elements, same num elts
+// n: double width elements, half num elts
+// h: half width elements, double num elts
+// e: half width elements, double num elts, unsigned
+// i: constant int
+// l: constant uint64
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// k: default elt width, double num elts
+// #: array of default vectors
+// p: pointer type
+// c: const pointer type
+
+// sizes:
+// c: char
+// s: short
+// i: int
+// l: long
+// f: float
+// h: half-float
+
+// size modifiers:
+// U: unsigned
+// Q: 128b
+// P: polynomial
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.1 Addition
+def VADD : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
+def VADDL : SInst<"wdd", "csiUcUsUi">;
+def VADDW : SInst<"wwd", "csiUcUsUi">;
+def VHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VRHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VQADD : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VADDHN : IInst<"dww", "csiUcUsUi">;
+def VRADDHN : IInst<"dww", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.2 Multiplication
+def VMUL : Inst<"ddd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_MUL>;
+def VMLA : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
+def VMLAL : SInst<"wwdd", "csiUcUsUi">;
+def VMLS : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
+def VMLSL : SInst<"wwdd", "csiUcUsUi">;
+def VQDMULH : SInst<"ddd", "siQsQi">;
+def VQRDMULH : SInst<"ddd", "siQsQi">;
+def VQDMLAL : SInst<"wwdd", "si">;
+def VQDMLSL : SInst<"wwdd", "si">;
+def VMULL : SInst<"wdd", "csiUcUsUiPc">;
+def VQDMULL : SInst<"wdd", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.3 Subtraction
+def VSUB : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
+def VSUBL : SInst<"wdd", "csiUcUsUi">;
+def VSUBW : SInst<"wwd", "csiUcUsUi">;
+def VQSUB : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VHSUB : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VSUBHN : IInst<"dww", "csiUcUsUi">;
+def VRSUBHN : IInst<"dww", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.4 Comparison
+def VCEQ : Inst<"udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
+def VCGE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
+def VCLE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
+def VCGT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
+def VCLT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
+def VCAGE : IInst<"udd", "fQf">;
+def VCALE : IInst<"udd", "fQf">;
+def VCAGT : IInst<"udd", "fQf">;
+def VCALT : IInst<"udd", "fQf">;
+def VTST : WInst<"udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.5 Absolute Difference
+def VABD : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VABDL : SInst<"wdd", "csiUcUsUi">;
+def VABA : SInst<"dddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VABAL : SInst<"wwdd", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.6 Max/Min
+def VMAX : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VMIN : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.7 Pairdise Addition
+def VPADD : IInst<"ddd", "csiUcUsUif">;
+def VPADDL : SInst<"nd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VPADAL : SInst<"nnd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.8-9 Folding Max/Min
+def VPMAX : SInst<"ddd", "csiUcUsUif">;
+def VPMIN : SInst<"ddd", "csiUcUsUif">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.10 Reciprocal/Sqrt
+def VRECPS : IInst<"ddd", "fQf">;
+def VRSQRTS : IInst<"ddd", "fQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.11 Shifts by signed variable
+def VSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.12 Shifts by constant
+let isShift = 1 in {
+def VSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSHL_N : IInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHLU_N : SInst<"udi", "csilQcQsQiQl">;
+def VSHRN_N : IInst<"hki", "silUsUiUl">;
+def VQSHRUN_N : SInst<"eki", "sil">;
+def VQRSHRUN_N : SInst<"eki", "sil">;
+def VQSHRN_N : SInst<"hki", "silUsUiUl">;
+def VRSHRN_N : IInst<"hki", "silUsUiUl">;
+def VQRSHRN_N : SInst<"hki", "silUsUiUl">;
+def VSHLL_N : SInst<"wdi", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.13 Shifts with insert
+def VSRI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+def VSLI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.14 Loads and stores of a single vector
+def VLD1 : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_LANE : WInst<"dcdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_DUP : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1 : WInst<"vpd", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1_LANE : WInst<"vpdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.15 Loads and stores of an N-element structure
+def VLD2 : WInst<"2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD3 : WInst<"3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD4 : WInst<"4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD2_DUP : WInst<"2c", "UcUsUiUlcsilhfPcPs">;
+def VLD3_DUP : WInst<"3c", "UcUsUiUlcsilhfPcPs">;
+def VLD4_DUP : WInst<"4c", "UcUsUiUlcsilhfPcPs">;
+def VLD2_LANE : WInst<"2c2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD3_LANE : WInst<"3c3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD4_LANE : WInst<"4c4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST2 : WInst<"vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST3 : WInst<"vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST4 : WInst<"vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST2_LANE : WInst<"vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST3_LANE : WInst<"vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST4_LANE : WInst<"vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.16 Extract lanes from a vector
+def VGET_LANE : IInst<"sdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.17 Set lanes within a vector
+def VSET_LANE : IInst<"dsdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.18 Initialize a vector from bit pattern
+def VCREATE: Inst<"dl", "csihfUcUsUiUlPcPsl", OP_CAST>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.19 Set all lanes to same value
+def VDUP_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VMOV_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.20 Combining vectors
+def VCOMBINE : Inst<"kdd", "csilhfUcUsUiUlPcPs", OP_CONC>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.21 Splitting vectors
+def VGET_HIGH : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_HI>;
+def VGET_LOW : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_LO>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.22 Converting vectors
+def VCVT_S32 : SInst<"xd", "fQf">;
+def VCVT_U32 : SInst<"ud", "fQf">;
+def VCVT_F16 : SInst<"hk", "f">;
+def VCVT_N_S32 : SInst<"xdi", "fQf">;
+def VCVT_N_U32 : SInst<"udi", "fQf">;
+def VCVT_F32 : SInst<"fd", "iUiQiQUi">;
+def VCVT_F32_F16 : SInst<"kh", "f">;
+def VCVT_N_F32 : SInst<"fdi", "iUiQiQUi">;
+def VMOVN : IInst<"hk", "silUsUiUl">;
+def VMOVL : SInst<"wd", "csiUcUsUi">;
+def VQMOVN : SInst<"hk", "silUsUiUl">;
+def VQMOVUN : SInst<"ek", "sil">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.23-24 Table lookup, Extended table lookup
+def VTBL1 : WInst<"ddt", "UccPc">;
+def VTBL2 : WInst<"d2t", "UccPc">;
+def VTBL3 : WInst<"d3t", "UccPc">;
+def VTBL4 : WInst<"d4t", "UccPc">;
+def VTBX1 : WInst<"dddt", "UccPc">;
+def VTBX2 : WInst<"dd2t", "UccPc">;
+def VTBX3 : WInst<"dd3t", "UccPc">;
+def VTBX4 : WInst<"dd4t", "UccPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.25 Operations with a scalar value
+def VMLA_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
+def VMLAL_LANE : SInst<"wwddi", "siUsUi">;
+def VQDMLAL_LANE : SInst<"wwddi", "si">;
+def VMLS_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
+def VMLSL_LANE : SInst<"wwddi", "siUsUi">;
+def VQDMLSL_LANE : SInst<"wwddi", "si">;
+def VMUL_N : Inst<"dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
+def VMULL_N : SInst<"wda", "siUsUi">;
+def VMULL_LANE : SInst<"wddi", "siUsUi">;
+def VQDMULL_N : SInst<"wda", "si">;
+def VQDMULL_LANE : SInst<"wddi", "si">;
+def VQDMULH_N : SInst<"dda", "siQsQi">;
+def VQDMULH_LANE : SInst<"dddi", "siQsQi">;
+def VQRDMULH_N : SInst<"dda", "siQsQi">;
+def VQRDMULH_LANE : SInst<"dddi", "siQsQi">;
+def VMLA_N : Inst<"ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
+def VMLAL_N : SInst<"wwda", "siUsUi">;
+def VQDMLAL_N : SInst<"wwda", "si">;
+def VMLS_N : Inst<"ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
+def VMLSL_N : SInst<"wwda", "siUsUi">;
+def VQDMLSL_N : SInst<"wwda", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.26 Vector Extract
+def VEXT : WInst<"dddi", "cUcPcsUsPsiUilUlQcQUcQPcQsQUsQPsQiQUiQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.27 Reverse vector elements (sdap endianness)
+def VREV64 : Inst<"dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", OP_REV64>;
+def VREV32 : Inst<"dd", "csUcUsPcQcQsQUcQUsQPc", OP_REV32>;
+def VREV16 : Inst<"dd", "cUcPcQcQUcQPc", OP_REV16>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.28 Other single operand arithmetic
+def VABS : SInst<"dd", "csifQcQsQiQf">;
+def VQABS : SInst<"dd", "csiQcQsQi">;
+def VNEG : Inst<"dd", "csifQcQsQiQf", OP_NEG>;
+def VQNEG : SInst<"dd", "csiQcQsQi">;
+def VCLS : SInst<"dd", "csiQcQsQi">;
+def VCLZ : IInst<"dd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VCNT : WInst<"dd", "UccPcQUcQcQPc">;
+def VRECPE : SInst<"dd", "fUiQfQUi">;
+def VRSQRTE : SInst<"dd", "fUiQfQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.29 Logical operations
+def VMVN : Inst<"dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
+def VAND : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
+def VORR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
+def VEOR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
+def VBIC : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
+def VORN : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
+def VBSL : Inst<"dudd", "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.30 Transposition operations
+def VTRN: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VZIP: WInst<"2dd", "csUcUsfPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VUZP: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.31 Vector reinterpret cast operations
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h b/contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h
new file mode 100644
index 0000000..c236766
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Checker/AnalysisConsumer.h
@@ -0,0 +1,35 @@
+//===--- AnalysisConsumer.h - Front-end Analysis Engine Hooks ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the functions necessary for a front-end to run various
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CHECKER_ANALYSISCONSUMER_H
+#define LLVM_CLANG_CHECKER_ANALYSISCONSUMER_H
+
+#include <string>
+
+namespace clang {
+
+class AnalyzerOptions;
+class ASTConsumer;
+class Preprocessor;
+
+/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
+/// analysis passes. (The set of analyses run is controlled by command-line
+/// options.)
+ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
+ const std::string &output,
+ const AnalyzerOptions& Opts);
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h
index 5b65d52..3749b43 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/BugReporter/BugReporter.h
@@ -70,6 +70,7 @@ protected:
virtual void Profile(llvm::FoldingSetNodeID& hash) const {
hash.AddInteger(getLocation().getRawEncoding());
+ hash.AddString(Description);
}
public:
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h
new file mode 100644
index 0000000..1c0bbb7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Checker/FrontendActions.h
@@ -0,0 +1,29 @@
+//===-- FrontendActions.h - Useful Frontend Actions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CHECKER_FRONTENDACTIONS_H
+#define LLVM_CLANG_CHECKER_FRONTENDACTIONS_H
+
+#include "clang/Frontend/FrontendAction.h"
+
+namespace clang {
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class AnalysisAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PathDiagnosticClients.h b/contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h
index f8d2eeb..d3aa3b2 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PathDiagnosticClients.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathDiagnosticClients.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_PATH_DIAGNOSTIC_CLIENTS_H
-#define LLVM_CLANG_FRONTEND_PATH_DIAGNOSTIC_CLiENTS_H
+#ifndef LLVM_CLANG_CHECKER_PATH_DIAGNOSTIC_CLIENTS_H
+#define LLVM_CLANG_CHECKER_PATH_DIAGNOSTIC_CLiENTS_H
#include <string>
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h
index 8cb9cc8..49dc3fa 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Checker.h
@@ -38,16 +38,20 @@ class CheckerContext {
const unsigned size;
bool DoneEvaluating; // FIXME: This is not a permanent API change.
public:
+ bool *respondsToCallback;
+public:
CheckerContext(ExplodedNodeSet &dst, GRStmtNodeBuilder &builder,
GRExprEngine &eng, ExplodedNode *pred,
const void *tag, ProgramPoint::Kind K,
+ bool *respondsToCB = 0,
const Stmt *stmt = 0, const GRState *st = 0)
: Dst(dst), B(builder), Eng(eng), Pred(pred),
OldSink(B.BuildSinks),
OldTag(B.Tag, tag),
OldPointKind(B.PointKind, K),
OldHasGen(B.HasGeneratedNode),
- ST(st), statement(stmt), size(Dst.size()) {}
+ ST(st), statement(stmt), size(Dst.size()),
+ respondsToCallback(respondsToCB) {}
~CheckerContext();
@@ -144,6 +148,7 @@ public:
// If the 'state' is not new, we need to check if the cached state 'ST'
// is new.
if (state != getState() || (ST && ST != B.GetState(Pred)))
+ // state is new or equals to ST.
GenerateNode(state, true);
else
Dst.Add(Pred);
@@ -188,10 +193,11 @@ private:
GRStmtNodeBuilder &Builder,
GRExprEngine &Eng,
const Stmt *S,
- ExplodedNode *Pred, void *tag, bool isPrevisit) {
+ ExplodedNode *Pred, void *tag, bool isPrevisit,
+ bool& respondsToCallback) {
CheckerContext C(Dst, Builder, Eng, Pred, tag,
isPrevisit ? ProgramPoint::PreStmtKind :
- ProgramPoint::PostStmtKind, S);
+ ProgramPoint::PostStmtKind, &respondsToCallback, S);
if (isPrevisit)
_PreVisit(C, S);
else
@@ -202,7 +208,7 @@ private:
GRExprEngine &Eng, const ObjCMessageExpr *ME,
ExplodedNode *Pred, const GRState *state, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag, ProgramPoint::PostStmtKind,
- ME, state);
+ 0, ME, state);
return EvalNilReceiver(C, ME);
}
@@ -210,7 +216,7 @@ private:
GRExprEngine &Eng, const CallExpr *CE,
ExplodedNode *Pred, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag, ProgramPoint::PostStmtKind,
- CE);
+ 0, CE);
return EvalCallExpr(C, CE);
}
@@ -223,7 +229,7 @@ private:
bool isPrevisit) {
CheckerContext C(Dst, Builder, Eng, Pred, tag,
isPrevisit ? ProgramPoint::PreStmtKind :
- ProgramPoint::PostStmtKind, StoreE);
+ ProgramPoint::PostStmtKind, 0, StoreE);
assert(isPrevisit && "Only previsit supported for now.");
PreVisitBind(C, AssignE, StoreE, location, val);
}
@@ -238,7 +244,7 @@ private:
void *tag, bool isLoad) {
CheckerContext C(Dst, Builder, Eng, Pred, tag,
isLoad ? ProgramPoint::PreLoadKind :
- ProgramPoint::PreStoreKind, S, state);
+ ProgramPoint::PreStoreKind, 0, S, state);
VisitLocation(C, S, location);
}
@@ -246,8 +252,8 @@ private:
GRExprEngine &Eng, const Stmt *S, ExplodedNode *Pred,
SymbolReaper &SymReaper, void *tag) {
CheckerContext C(Dst, Builder, Eng, Pred, tag,
- ProgramPoint::PostPurgeDeadSymbolsKind, S);
- EvalDeadSymbols(C, S, SymReaper);
+ ProgramPoint::PostPurgeDeadSymbolsKind, 0, S);
+ EvalDeadSymbols(C, SymReaper);
}
public:
@@ -257,8 +263,7 @@ public:
virtual void VisitLocation(CheckerContext &C, const Stmt *S, SVal location) {}
virtual void PreVisitBind(CheckerContext &C, const Stmt *AssignE,
const Stmt *StoreE, SVal location, SVal val) {}
- virtual void EvalDeadSymbols(CheckerContext &C, const Stmt *S,
- SymbolReaper &SymReaper) {}
+ virtual void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper) {}
virtual void EvalEndPath(GREndPathNodeBuilder &B, void *tag,
GRExprEngine &Eng) {}
@@ -278,6 +283,9 @@ public:
bool Assumption) {
return state;
}
+
+ virtual void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B,
+ bool hasWorkRemaining) {}
};
} // end clang namespace
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h
index 72f0ae1..e2ba89b 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/CheckerVisitor.h
@@ -79,8 +79,13 @@ break;
}
}
- void PreVisitStmt(CheckerContext &C, const Stmt *S) {}
- void PostVisitStmt(CheckerContext &C, const Stmt *S) {}
+ void PreVisitStmt(CheckerContext &C, const Stmt *S) {
+ *C.respondsToCallback = false;
+ }
+
+ void PostVisitStmt(CheckerContext &C, const Stmt *S) {
+ *C.respondsToCallback = false;
+ }
void PreVisitCastExpr(CheckerContext &C, const CastExpr *E) {
static_cast<ImplClass*>(this)->PreVisitStmt(C, E);
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h
index b9bbebc..2981731 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Environment.h
@@ -86,7 +86,7 @@ public:
Environment BindExpr(Environment Env, const Stmt *S, SVal V,
bool Invalidate);
- Environment RemoveDeadBindings(Environment Env, const Stmt *S,
+ Environment RemoveDeadBindings(Environment Env,
SymbolReaper &SymReaper, const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
};
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h
index c09c893..c875a23 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/ExplodedGraph.h
@@ -36,7 +36,6 @@ namespace clang {
class GRState;
class CFG;
-class ASTContext;
class ExplodedGraph;
//===----------------------------------------------------------------------===//
@@ -240,9 +239,6 @@ protected:
/// and successor groups.
BumpVectorContext BVC;
- /// Ctx - The ASTContext used to "interpret" CodeDecl.
- ASTContext& Ctx;
-
/// NumNodes - The number of nodes in the graph.
unsigned NumNodes;
@@ -256,7 +252,7 @@ public:
bool* IsNew = 0);
ExplodedGraph* MakeEmptyGraph() const {
- return new ExplodedGraph(Ctx);
+ return new ExplodedGraph();
}
/// addRoot - Add an untyped node to the set of roots.
@@ -271,7 +267,7 @@ public:
return V;
}
- ExplodedGraph(ASTContext& ctx) : Ctx(ctx), NumNodes(0) {}
+ ExplodedGraph() : NumNodes(0) {}
~ExplodedGraph() {}
@@ -318,8 +314,6 @@ public:
llvm::BumpPtrAllocator & getAllocator() { return BVC.getAllocator(); }
BumpVectorContext &getNodeAllocator() { return BVC; }
- ASTContext& getContext() { return Ctx; }
-
typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> NodeMap;
std::pair<ExplodedGraph*, InterExplodedGraphMap*>
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h
index 2d8afee..7f101dc 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRCoreEngine.h
@@ -57,6 +57,10 @@ class GRCoreEngine {
/// These are used to record for key nodes in the ExplodedGraph the
/// number of times different CFGBlocks have been visited along a path.
GRBlockCounter::Factory BCounterFactory;
+
+ /// A flag that indicates whether paths were halted because
+ /// ProcessBlockEntrace returned false.
+ bool BlockAborted;
void GenerateNode(const ProgramPoint& Loc, const GRState* State,
ExplodedNode* Pred);
@@ -105,17 +109,19 @@ private:
public:
/// Construct a GRCoreEngine object to analyze the provided CFG using
/// a DFS exploration of the exploded graph.
- GRCoreEngine(ASTContext& ctx, GRSubEngine& subengine)
- : SubEngine(subengine), G(new ExplodedGraph(ctx)),
+ GRCoreEngine(GRSubEngine& subengine)
+ : SubEngine(subengine), G(new ExplodedGraph()),
WList(GRWorkList::MakeBFS()),
- BCounterFactory(G->getAllocator()) {}
+ BCounterFactory(G->getAllocator()),
+ BlockAborted(false) {}
/// Construct a GRCoreEngine object to analyze the provided CFG and to
/// use the provided worklist object to execute the worklist algorithm.
/// The GRCoreEngine object assumes ownership of 'wlist'.
- GRCoreEngine(ASTContext& ctx, GRWorkList* wlist, GRSubEngine& subengine)
- : SubEngine(subengine), G(new ExplodedGraph(ctx)), WList(wlist),
- BCounterFactory(G->getAllocator()) {}
+ GRCoreEngine(GRWorkList* wlist, GRSubEngine& subengine)
+ : SubEngine(subengine), G(new ExplodedGraph()), WList(wlist),
+ BCounterFactory(G->getAllocator()),
+ BlockAborted(false) {}
~GRCoreEngine() {
delete WList;
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h
index ac407f6..8eaf3f4 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRExprEngine.h
@@ -16,6 +16,7 @@
#ifndef LLVM_CLANG_ANALYSIS_GREXPRENGINE
#define LLVM_CLANG_ANALYSIS_GREXPRENGINE
+#include "clang/Checker/PathSensitive/AnalysisManager.h"
#include "clang/Checker/PathSensitive/GRSubEngine.h"
#include "clang/Checker/PathSensitive/GRCoreEngine.h"
#include "clang/Checker/PathSensitive/GRState.h"
@@ -75,14 +76,25 @@ class GRExprEngine : public GRSubEngine {
llvm::OwningPtr<GRSimpleAPICheck> BatchAuditor;
typedef llvm::DenseMap<void *, unsigned> CheckerMap;
- CheckerMap CheckerM;
-
typedef std::vector<std::pair<void *, Checker*> > CheckersOrdered;
+ typedef llvm::DenseMap<std::pair<unsigned, unsigned>, CheckersOrdered *>
+ CheckersOrderedCache;
+
+ /// A registration map from checker tag to the index into the
+ /// ordered checkers vector.
+ CheckerMap CheckerM;
+
+ /// An ordered vector of checkers that are called when evaluating
+ /// various expressions and statements.
CheckersOrdered Checkers;
- /// BR - The BugReporter associated with this engine. It is important that
- // this object be placed at the very end of member variables so that its
- // destructor is called before the rest of the GRExprEngine is destroyed.
+ /// A map used for caching the checkers that respond to the callback for
+ /// a particular statement and visitation order.
+ CheckersOrderedCache COCache;
+
+ /// The BugReporter associated with this engine. It is important that
+ /// this object be placed at the very end of member variables so that its
+ /// destructor is called before the rest of the GRExprEngine is destroyed.
GRBugReporter BR;
llvm::OwningPtr<GRTransferFuncs> TF;
@@ -106,7 +118,7 @@ public:
}
/// getContext - Return the ASTContext associated with this analysis.
- ASTContext& getContext() const { return G.getContext(); }
+ ASTContext& getContext() const { return AMgr.getASTContext(); }
AnalysisManager &getAnalysisManager() const { return AMgr; }
@@ -178,12 +190,15 @@ public:
/// nodes when the control reaches the end of a function.
void ProcessEndPath(GREndPathNodeBuilder& builder);
- // Generate the entry node of the callee.
+ /// Generate the entry node of the callee.
void ProcessCallEnter(GRCallEnterNodeBuilder &builder);
- // Generate the first post callsite node.
+ /// Generate the first post callsite node.
void ProcessCallExit(GRCallExitNodeBuilder &builder);
+ /// Called by GRCoreEngine when the analysis worklist has terminated.
+ void ProcessEndWorklist(bool hasWorkRemaining);
+
/// EvalAssume - Callback function invoked by the ConstraintManager when
/// making assumptions about state values.
const GRState *ProcessAssume(const GRState *state, SVal cond, bool assumption);
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h
index 25ba1f8..67a2caf 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRState.h
@@ -211,16 +211,18 @@ public:
const GRState *bindLoc(SVal location, SVal V) const;
+ const GRState *bindDefault(SVal loc, SVal V) const;
+
const GRState *unbindLoc(Loc LV) const;
/// Get the lvalue for a variable reference.
- SVal getLValue(const VarDecl *D, const LocationContext *LC) const;
+ Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
/// Get the lvalue for a StringLiteral.
- SVal getLValue(const StringLiteral *literal) const;
+ Loc getLValue(const StringLiteral *literal) const;
- SVal getLValue(const CompoundLiteralExpr *literal,
- const LocationContext *LC) const;
+ Loc getLValue(const CompoundLiteralExpr *literal,
+ const LocationContext *LC) const;
/// Get the lvalue for an ivar reference.
SVal getLValue(const ObjCIvarDecl *decl, SVal base) const;
@@ -446,7 +448,7 @@ public:
StoreManager& getStoreManager() { return *StoreMgr; }
ConstraintManager& getConstraintManager() { return *ConstraintMgr; }
- const GRState* RemoveDeadBindings(const GRState* St, Stmt* Loc,
+ const GRState* RemoveDeadBindings(const GRState* St,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper);
@@ -468,9 +470,6 @@ public:
const GRState* getPersistentState(GRState& Impl);
- bool isEqual(const GRState* state, const Expr* Ex, const llvm::APSInt& V);
- bool isEqual(const GRState* state, const Expr* Ex, uint64_t);
-
//==---------------------------------------------------------------------==//
// Generic Data Map methods.
//==---------------------------------------------------------------------==//
@@ -620,16 +619,22 @@ inline const GRState *GRState::bindLoc(SVal LV, SVal V) const {
return !isa<Loc>(LV) ? this : bindLoc(cast<Loc>(LV), V);
}
-inline SVal GRState::getLValue(const VarDecl* VD,
+inline const GRState *GRState::bindDefault(SVal loc, SVal V) const {
+ const MemRegion *R = cast<loc::MemRegionVal>(loc).getRegion();
+ Store new_store = getStateManager().StoreMgr->BindDefault(St, R, V);
+ return makeWithStore(new_store);
+}
+
+inline Loc GRState::getLValue(const VarDecl* VD,
const LocationContext *LC) const {
return getStateManager().StoreMgr->getLValueVar(VD, LC);
}
-inline SVal GRState::getLValue(const StringLiteral *literal) const {
+inline Loc GRState::getLValue(const StringLiteral *literal) const {
return getStateManager().StoreMgr->getLValueString(literal);
}
-inline SVal GRState::getLValue(const CompoundLiteralExpr *literal,
+inline Loc GRState::getLValue(const CompoundLiteralExpr *literal,
const LocationContext *LC) const {
return getStateManager().StoreMgr->getLValueCompoundLiteral(literal, LC);
}
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h
index d2e7457..90a41d7 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRSubEngine.h
@@ -41,27 +41,27 @@ public:
virtual GRStateManager& getStateManager() = 0;
- /// ProcessStmt - Called by GRCoreEngine. Used to generate new successor
- /// nodes by processing the 'effects' of a block-level statement.
+ /// Called by GRCoreEngine. Used to generate new successor
+ /// nodes by processing the 'effects' of a block-level statement.
virtual void ProcessStmt(CFGElement E, GRStmtNodeBuilder& builder) = 0;
- /// ProcessBlockEntrance - Called by GRCoreEngine when start processing
- /// a CFGBlock. This method returns true if the analysis should continue
- /// exploring the given path, and false otherwise.
+ /// Called by GRCoreEngine when start processing
+ /// a CFGBlock. This method returns true if the analysis should continue
+ /// exploring the given path, and false otherwise.
virtual bool ProcessBlockEntrance(CFGBlock* B, const ExplodedNode *Pred,
GRBlockCounter BC) = 0;
- /// ProcessBranch - Called by GRCoreEngine. Used to generate successor
+ /// Called by GRCoreEngine. Used to generate successor
/// nodes by processing the 'effects' of a branch condition.
virtual void ProcessBranch(Stmt* Condition, Stmt* Term,
GRBranchNodeBuilder& builder) = 0;
- /// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a computed goto jump.
+ /// Called by GRCoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a computed goto jump.
virtual void ProcessIndirectGoto(GRIndirectGotoNodeBuilder& builder) = 0;
- /// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
- /// nodes by processing the 'effects' of a switch statement.
+ /// Called by GRCoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a switch statement.
virtual void ProcessSwitch(GRSwitchNodeBuilder& builder) = 0;
/// ProcessEndPath - Called by GRCoreEngine. Used to generate end-of-path
@@ -74,10 +74,14 @@ public:
// Generate the first post callsite node.
virtual void ProcessCallExit(GRCallExitNodeBuilder &builder) = 0;
- /// EvalAssume - Called by ConstraintManager. Used to call checker-specific
- /// logic for handling assumptions on symbolic values.
+ /// Called by ConstraintManager. Used to call checker-specific
+ /// logic for handling assumptions on symbolic values.
virtual const GRState* ProcessAssume(const GRState *state,
SVal cond, bool assumption) = 0;
+
+ /// Called by GRCoreEngine when the analysis worklist is either empty or the
+ // maximum number of analysis steps have been reached.
+ virtual void ProcessEndWorklist(bool hasWorkRemaining) = 0;
};
}
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h
index 13325ed..374f998 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/GRTransferFuncs.h
@@ -66,7 +66,7 @@ public:
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
- Stmt* S, const GRState* state,
+ const GRState* state,
SymbolReaper& SymReaper) {}
// Return statements.
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h
index 2ab3b42..feb4b72 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/MemRegion.h
@@ -34,7 +34,9 @@ class MemRegionManager;
class MemSpaceRegion;
class LocationContext;
class StackFrameContext;
+class ValueManager;
class VarRegion;
+class CodeTextRegion;
//===----------------------------------------------------------------------===//
// Base region classes.
@@ -46,14 +48,17 @@ class MemRegion : public llvm::FoldingSetNode {
public:
enum Kind {
// Memory spaces.
- BEG_MEMSPACES,
- GenericMemSpaceRegionKind = BEG_MEMSPACES,
+ GenericMemSpaceRegionKind,
StackLocalsSpaceRegionKind,
StackArgumentsSpaceRegionKind,
HeapSpaceRegionKind,
UnknownSpaceRegionKind,
- GlobalsSpaceRegionKind,
- END_MEMSPACES = GlobalsSpaceRegionKind,
+ NonStaticGlobalSpaceRegionKind,
+ StaticGlobalSpaceRegionKind,
+ BEG_GLOBAL_MEMSPACES = NonStaticGlobalSpaceRegionKind,
+ END_GLOBAL_MEMSPACES = StaticGlobalSpaceRegionKind,
+ BEG_MEMSPACES = GenericMemSpaceRegionKind,
+ END_MEMSPACES = StaticGlobalSpaceRegionKind,
// Untyped regions.
SymbolicRegionKind,
AllocaRegionKind,
@@ -146,13 +151,48 @@ public:
};
class GlobalsSpaceRegion : public MemSpaceRegion {
+protected:
+ GlobalsSpaceRegion(MemRegionManager *mgr, Kind k)
+ : MemSpaceRegion(mgr, k) {}
+public:
+ static bool classof(const MemRegion *R) {
+ Kind k = R->getKind();
+ return k >= BEG_GLOBAL_MEMSPACES && k <= END_GLOBAL_MEMSPACES;
+ }
+};
+
+class StaticGlobalSpaceRegion : public GlobalsSpaceRegion {
friend class MemRegionManager;
- GlobalsSpaceRegion(MemRegionManager *mgr)
- : MemSpaceRegion(mgr, GlobalsSpaceRegionKind) {}
+ const CodeTextRegion *CR;
+
+ StaticGlobalSpaceRegion(MemRegionManager *mgr, const CodeTextRegion *cr)
+ : GlobalsSpaceRegion(mgr, StaticGlobalSpaceRegionKind), CR(cr) {}
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ void dumpToStream(llvm::raw_ostream& os) const;
+
+ const CodeTextRegion *getCodeRegion() const { return CR; }
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == StaticGlobalSpaceRegionKind;
+ }
+};
+
+class NonStaticGlobalSpaceRegion : public GlobalsSpaceRegion {
+ friend class MemRegionManager;
+
+ NonStaticGlobalSpaceRegion(MemRegionManager *mgr)
+ : GlobalsSpaceRegion(mgr, NonStaticGlobalSpaceRegionKind) {}
+
public:
+
+ void dumpToStream(llvm::raw_ostream& os) const;
+
static bool classof(const MemRegion *R) {
- return R->getKind() == GlobalsSpaceRegionKind;
+ return R->getKind() == NonStaticGlobalSpaceRegionKind;
}
};
@@ -232,6 +272,11 @@ public:
return superRegion;
}
+ /// getExtent - Returns the size of the region in bytes.
+ virtual DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const {
+ return UnknownVal();
+ }
+
MemRegionManager* getMemRegionManager() const;
bool isSubRegionOf(const MemRegion* R) const;
@@ -288,6 +333,8 @@ public:
bool isBoundable() const { return true; }
+ DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+
void Profile(llvm::FoldingSetNodeID& ID) const;
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const Expr* Ex,
@@ -502,6 +549,8 @@ public:
bool isBoundable() const { return true; }
+ DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+
void Profile(llvm::FoldingSetNodeID& ID) const;
static void ProfileRegion(llvm::FoldingSetNodeID& ID,
@@ -536,6 +585,8 @@ public:
return Str->getType();
}
+ DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+
bool isBoundable() const { return false; }
void Profile(llvm::FoldingSetNodeID& ID) const {
@@ -595,6 +646,8 @@ public:
const Decl* getDecl() const { return D; }
void Profile(llvm::FoldingSetNodeID& ID) const;
+ DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+
static bool classof(const MemRegion* R) {
unsigned k = R->getKind();
return k >= BEG_DECL_REGIONS && k <= END_DECL_REGIONS;
@@ -679,6 +732,8 @@ public:
return C.getCanonicalType(getDecl()->getType());
}
+ DefinedOrUnknownSVal getExtent(ValueManager& ValMgr) const;
+
static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FieldDecl* FD,
const MemRegion* superRegion) {
DeclRegion::ProfileRegion(ID, FD, superRegion, FieldRegionKind);
@@ -793,12 +848,14 @@ class MemRegionManager {
llvm::BumpPtrAllocator& A;
llvm::FoldingSet<MemRegion> Regions;
- GlobalsSpaceRegion *globals;
+ NonStaticGlobalSpaceRegion *globals;
llvm::DenseMap<const StackFrameContext *, StackLocalsSpaceRegion *>
StackLocalsSpaceRegions;
llvm::DenseMap<const StackFrameContext *, StackArgumentsSpaceRegion *>
StackArgumentsSpaceRegions;
+ llvm::DenseMap<const CodeTextRegion *, StaticGlobalSpaceRegion *>
+ StaticsGlobalSpaceRegions;
HeapSpaceRegion *heap;
UnknownSpaceRegion *unknown;
@@ -825,8 +882,8 @@ public:
getStackArgumentsRegion(const StackFrameContext *STC);
/// getGlobalsRegion - Retrieve the memory region associated with
- /// all global variables.
- const GlobalsSpaceRegion *getGlobalsRegion();
+ /// global variables.
+ const GlobalsSpaceRegion *getGlobalsRegion(const CodeTextRegion *R = 0);
/// getHeapRegion - Retrieve the memory region associated with the
/// generic "heap".
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h
index 040db83..55fd3ea 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SVals.h
@@ -98,6 +98,8 @@ public:
bool isConstant() const;
+ bool isConstant(int I) const;
+
bool isZeroConstant() const;
/// hasConjuredSymbol - If this SVal wraps a conjured symbol, return true;
@@ -109,7 +111,7 @@ public:
const FunctionDecl* getAsFunctionDecl() const;
/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
- /// wraps a symbol, return that SymbolRef. Otherwise return a SymbolData*
+ /// wraps a symbol, return that SymbolRef. Otherwise return NULL.
SymbolRef getAsLocSymbol() const;
/// Get the symbol in the SVal or its base region.
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h
index 9beb8cb..9192ca7 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SValuator.h
@@ -47,11 +47,15 @@ public:
virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode Op,
NonLoc lhs, NonLoc rhs, QualType resultTy) = 0;
- virtual SVal EvalBinOpLL(BinaryOperator::Opcode Op, Loc lhs, Loc rhs,
- QualType resultTy) = 0;
+ virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode Op,
+ Loc lhs, Loc rhs, QualType resultTy) = 0;
virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode Op,
Loc lhs, NonLoc rhs, QualType resultTy) = 0;
+
+ /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V) = 0;
SVal EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
SVal L, SVal R, QualType T);
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h
index f3155b9..7a60ebb 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/Store.h
@@ -64,6 +64,10 @@ public:
/// to the location given for \c loc.
virtual Store Bind(Store store, Loc loc, SVal val) = 0;
+ virtual Store BindDefault(Store store, const MemRegion *R, SVal V) {
+ return store;
+ }
+
virtual Store Remove(Store St, Loc L) = 0;
/// BindCompoundLiteral - Return the store that has the bindings currently
@@ -87,16 +91,16 @@ public:
// caller's responsibility to 'delete' the returned map.
virtual SubRegionMap *getSubRegionMap(Store store) = 0;
- virtual SVal getLValueVar(const VarDecl *VD, const LocationContext *LC) {
+ virtual Loc getLValueVar(const VarDecl *VD, const LocationContext *LC) {
return ValMgr.makeLoc(MRMgr.getVarRegion(VD, LC));
}
- virtual SVal getLValueString(const StringLiteral* S) {
+ virtual Loc getLValueString(const StringLiteral* S) {
return ValMgr.makeLoc(MRMgr.getStringRegion(S));
}
- SVal getLValueCompoundLiteral(const CompoundLiteralExpr* CL,
- const LocationContext *LC) {
+ Loc getLValueCompoundLiteral(const CompoundLiteralExpr* CL,
+ const LocationContext *LC) {
return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC));
}
@@ -110,7 +114,8 @@ public:
virtual SVal getLValueElement(QualType elementType, SVal offset, SVal Base);
- // FIXME: Make out-of-line.
+ // FIXME: This should soon be eliminated altogether; clients should deal with
+ // region extents directly.
virtual DefinedOrUnknownSVal getSizeInElements(const GRState *state,
const MemRegion *region,
QualType EleTy) {
@@ -144,7 +149,7 @@ public:
return UnknownVal();
}
- virtual const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ virtual const GRState *RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) = 0;
@@ -164,18 +169,8 @@ public:
const MemRegion * const *Begin,
const MemRegion * const *End,
const Expr *E, unsigned Count,
- InvalidatedSymbols *IS);
-
- // FIXME: Make out-of-line.
- virtual const GRState *setExtent(const GRState *state,
- const MemRegion *region, SVal extent) {
- return state;
- }
-
- virtual llvm::Optional<SVal> getExtent(const GRState *state,
- const MemRegion *R) {
- return llvm::Optional<SVal>();
- }
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) = 0;
/// EnterStackFrame - Let the StoreManager to do something when execution
/// engine is about to execute into a callee.
diff --git a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h
index dea877c..ffbd289 100644
--- a/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h
+++ b/contrib/llvm/tools/clang/include/clang/Checker/PathSensitive/SymbolManager.h
@@ -31,6 +31,7 @@ namespace clang {
class ASTContext;
class BasicValueFactory;
class MemRegion;
+ class SubRegion;
class TypedRegion;
class VarRegion;
class StackFrameContext;
@@ -38,7 +39,7 @@ namespace clang {
class SymExpr : public llvm::FoldingSetNode {
public:
enum Kind { BEGIN_SYMBOLS,
- RegionValueKind, ConjuredKind, DerivedKind,
+ RegionValueKind, ConjuredKind, DerivedKind, ExtentKind,
END_SYMBOLS,
SymIntKind, SymSymKind };
private:
@@ -189,6 +190,34 @@ public:
}
};
+class SymbolExtent : public SymbolData {
+ const SubRegion *R;
+
+public:
+ SymbolExtent(SymbolID sym, const SubRegion *r)
+ : SymbolData(ExtentKind, sym), R(r) {}
+
+ const SubRegion *getRegion() const { return R; }
+
+ QualType getType(ASTContext&) const;
+
+ void dumpToStream(llvm::raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& profile, const SubRegion *R) {
+ profile.AddInteger((unsigned) ExtentKind);
+ profile.AddPointer(R);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, R);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr* SE) {
+ return SE->getKind() == ExtentKind;
+ }
+};
+
// SymIntExpr - Represents symbolic expression like 'x' + 3.
class SymIntExpr : public SymExpr {
const SymExpr *LHS;
@@ -305,6 +334,8 @@ public:
const SymbolDerived *getDerivedSymbol(SymbolRef parentSymbol,
const TypedRegion *R);
+ const SymbolExtent *getExtentSymbol(const SubRegion *R);
+
const SymIntExpr *getSymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
const llvm::APSInt& rhs, QualType t);
@@ -330,21 +361,23 @@ class SymbolReaper {
SetTy TheLiving;
SetTy TheDead;
const LocationContext *LCtx;
+ const Stmt *Loc;
SymbolManager& SymMgr;
public:
- SymbolReaper(const LocationContext *ctx, SymbolManager& symmgr)
- : LCtx(ctx), SymMgr(symmgr) {}
+ SymbolReaper(const LocationContext *ctx, const Stmt *s, SymbolManager& symmgr)
+ : LCtx(ctx), Loc(s), SymMgr(symmgr) {}
~SymbolReaper() {}
const LocationContext *getLocationContext() const { return LCtx; }
+ const Stmt *getCurrentStatement() const { return Loc; }
bool isLive(SymbolRef sym);
- bool isLive(const Stmt* Loc, const Stmt* ExprVal) const;
+ bool isLive(const Stmt *ExprVal) const;
- bool isLive(const Stmt* Loc, const VarRegion *VR) const;
+ bool isLive(const VarRegion *VR) const;
void markLive(SymbolRef sym);
bool maybeDead(SymbolRef sym);
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h b/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h
new file mode 100644
index 0000000..abcef81
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h
@@ -0,0 +1,37 @@
+//===--- BackendUtil.h - LLVM Backend Utilities -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_BACKEND_UTIL_H
+#define LLVM_CLANG_CODEGEN_BACKEND_UTIL_H
+
+namespace llvm {
+ class Module;
+ class raw_ostream;
+}
+
+namespace clang {
+ class Diagnostic;
+ class CodeGenOptions;
+ class TargetOptions;
+
+ enum BackendAction {
+ Backend_EmitAssembly, ///< Emit native assembly files
+ Backend_EmitBC, ///< Emit LLVM bitcode files
+ Backend_EmitLL, ///< Emit human-readable LLVM assembly
+ Backend_EmitNothing, ///< Don't emit anything (benchmarking mode)
+ Backend_EmitMCNull, ///< Run CodeGen, but don't emit anything
+ Backend_EmitObj ///< Emit native object files
+ };
+
+ void EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, llvm::Module *M,
+ BackendAction Action, llvm::raw_ostream *OS);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenAction.h b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
index dfc117a..cecfcda 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenAction.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
@@ -7,6 +7,9 @@
//
//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_CODEGEN_CODE_GEN_ACTION_H
+#define LLVM_CLANG_CODEGEN_CODE_GEN_ACTION_H
+
#include "clang/Frontend/FrontendAction.h"
#include "llvm/ADT/OwningPtr.h"
@@ -24,9 +27,13 @@ private:
protected:
CodeGenAction(unsigned _Act);
+ virtual bool hasIRSupport() const;
+
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile);
+ virtual void ExecuteAction();
+
virtual void EndSourceFileAction();
public:
@@ -68,3 +75,5 @@ public:
};
}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
index 2a3aa6a..c45ad08 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
@@ -33,6 +33,9 @@ namespace clang {
virtual llvm::Module* ReleaseModule() = 0;
};
+ /// CreateLLVMCodeGen - Create a CodeGenerator instance.
+ /// It is the responsibility of the caller to call delete on
+ /// the allocated CodeGenerator instance.
CodeGenerator *CreateLLVMCodeGen(Diagnostic &Diags,
const std::string &ModuleName,
const CodeGenOptions &CGO,
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Action.h b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
index ab3162a..4b45c98 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
@@ -51,9 +51,10 @@ public:
AssembleJobClass,
LinkJobClass,
LipoJobClass,
+ DsymutilJobClass,
JobClassFirst=PreprocessJobClass,
- JobClassLast=LipoJobClass
+ JobClassLast=DsymutilJobClass
};
static const char *getClassName(ActionClass AC);
@@ -211,6 +212,16 @@ public:
static bool classof(const LipoJobAction *) { return true; }
};
+class DsymutilJobAction : public JobAction {
+public:
+ DsymutilJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == DsymutilJobClass;
+ }
+ static bool classof(const DsymutilJobAction *) { return true; }
+};
+
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
index ebf40d4..a52789e 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
@@ -10,14 +10,9 @@
#ifndef CLANG_DRIVER_ARG_H_
#define CLANG_DRIVER_ARG_H_
-#include "llvm/Support/Casting.h"
-using llvm::isa;
-using llvm::cast;
-using llvm::cast_or_null;
-using llvm::dyn_cast;
-using llvm::dyn_cast_or_null;
-
#include "Util.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include <vector>
#include <string>
@@ -34,19 +29,10 @@ namespace driver {
/// ArgList to provide efficient iteration over all instances of a
/// particular option.
class Arg {
- public:
- enum ArgClass {
- FlagClass = 0,
- PositionalClass,
- JoinedClass,
- SeparateClass,
- CommaJoinedClass,
- JoinedAndSeparateClass
- };
+ Arg(const Arg &); // DO NOT IMPLEMENT
+ void operator=(const Arg &); // DO NOT IMPLEMENT
private:
- ArgClass Kind;
-
/// The option this argument is an instance of.
const Option *Opt;
@@ -58,20 +44,24 @@ namespace driver {
/// ArgList.
unsigned Index;
- /// Flag indicating whether this argument was used to effect
- /// compilation; used for generating "argument unused"
- /// diagnostics.
- mutable bool Claimed;
+ /// Was this argument used to effect compilation; used for generating
+ /// "argument unused" diagnostics.
+ mutable unsigned Claimed : 1;
+
+ /// Does this argument own its values.
+ mutable unsigned OwnsValues : 1;
- protected:
- Arg(ArgClass Kind, const Option *Opt, unsigned Index,
- const Arg *BaseArg = 0);
+ /// The argument values, as C strings.
+ llvm::SmallVector<const char *, 2> Values;
public:
- Arg(const Arg &);
- virtual ~Arg();
+ Arg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
+ Arg(const Option *Opt, unsigned Index,
+ const char *Value0, const Arg *BaseArg = 0);
+ Arg(const Option *Opt, unsigned Index,
+ const char *Value0, const char *Value1, const Arg *BaseArg = 0);
+ ~Arg();
- ArgClass getKind() const { return Kind; }
const Option &getOption() const { return *Opt; }
unsigned getIndex() const { return Index; }
@@ -85,19 +75,32 @@ namespace driver {
BaseArg = _BaseArg;
}
+ bool getOwnsValues() const { return OwnsValues; }
+ void setOwnsValues(bool Value) const { OwnsValues = Value; }
+
bool isClaimed() const { return getBaseArg().Claimed; }
/// claim - Set the Arg claimed bit.
-
- // FIXME: We need to deal with derived arguments and set the bit
- // in the original argument; not the derived one.
void claim() const { getBaseArg().Claimed = true; }
- virtual unsigned getNumValues() const = 0;
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const = 0;
+ unsigned getNumValues() const { return Values.size(); }
+ const char *getValue(const ArgList &Args, unsigned N=0) const {
+ return Values[N];
+ }
+
+ llvm::SmallVectorImpl<const char*> &getValues() {
+ return Values;
+ }
+
+ bool containsValue(llvm::StringRef Value) const {
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ if (Values[i] == Value)
+ return true;
+ return false;
+ }
/// render - Append the argument onto the given array as strings.
- virtual void render(const ArgList &Args, ArgStringList &Output) const = 0;
+ void render(const ArgList &Args, ArgStringList &Output) const;
/// renderAsInput - Append the argument, render as an input, onto
/// the given array as strings. The distinction is that some
@@ -114,116 +117,6 @@ namespace driver {
std::string getAsString(const ArgList &Args) const;
};
- /// FlagArg - An argument with no value.
- class FlagArg : public Arg {
- public:
- FlagArg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return 0; }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::FlagClass;
- }
- static bool classof(const FlagArg *) { return true; }
- };
-
- /// PositionalArg - A simple positional argument.
- class PositionalArg : public Arg {
- public:
- PositionalArg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return 1; }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::PositionalClass;
- }
- static bool classof(const PositionalArg *) { return true; }
- };
-
- /// JoinedArg - A single value argument where the value is joined
- /// (suffixed) to the option.
- class JoinedArg : public Arg {
- public:
- JoinedArg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return 1; }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::JoinedClass;
- }
- static bool classof(const JoinedArg *) { return true; }
- };
-
- /// SeparateArg - An argument where one or more values follow the
- /// option specifier immediately in the argument vector.
- class SeparateArg : public Arg {
- unsigned NumValues;
-
- public:
- SeparateArg(const Option *Opt, unsigned Index, unsigned NumValues,
- const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return NumValues; }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::SeparateClass;
- }
- static bool classof(const SeparateArg *) { return true; }
- };
-
- /// CommaJoinedArg - An argument with multiple values joined by
- /// commas and joined (suffixed) to the option specifier.
- ///
- /// The key point of this arg is that it renders its values into
- /// separate arguments, which allows it to be used as a generic
- /// mechanism for passing arguments through to tools.
- class CommaJoinedArg : public Arg {
- std::vector<std::string> Values;
-
- public:
- CommaJoinedArg(const Option *Opt, unsigned Index, const char *Str,
- const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return Values.size(); }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::CommaJoinedClass;
- }
- static bool classof(const CommaJoinedArg *) { return true; }
- };
-
- /// JoinedAndSeparateArg - An argument with both joined and separate
- /// values.
- class JoinedAndSeparateArg : public Arg {
- public:
- JoinedAndSeparateArg(const Option *Opt, unsigned Index,
- const Arg *BaseArg = 0);
-
- virtual void render(const ArgList &Args, ArgStringList &Output) const;
-
- virtual unsigned getNumValues() const { return 2; }
- virtual const char *getValue(const ArgList &Args, unsigned N=0) const;
-
- static bool classof(const Arg *A) {
- return A->getKind() == Arg::JoinedAndSeparateClass;
- }
- static bool classof(const JoinedAndSeparateArg *) { return true; }
- };
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
index 7a14ae8..257b653 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -52,9 +52,9 @@ namespace driver {
void SkipToNextArg();
public:
- typedef const Arg* value_type;
- typedef const Arg* reference;
- typedef const Arg* pointer;
+ typedef Arg * const * value_type;
+ typedef Arg * const & reference;
+ typedef Arg * const * pointer;
typedef std::forward_iterator_tag iterator_category;
typedef std::ptrdiff_t difference_type;
@@ -67,7 +67,7 @@ namespace driver {
operator const Arg*() { return *Current; }
reference operator*() const { return *Current; }
- pointer operator->() const { return *Current; }
+ pointer operator->() const { return Current; }
arg_iterator &operator++() {
++Current;
@@ -96,6 +96,10 @@ namespace driver {
/// check for the presence of Arg instances for a particular Option
/// and to iterate over groups of arguments.
class ArgList {
+ private:
+ ArgList(const ArgList &); // DO NOT IMPLEMENT
+ void operator=(const ArgList &); // DO NOT IMPLEMENT
+
public:
typedef llvm::SmallVector<Arg*, 16> arglist_type;
typedef arglist_type::iterator iterator;
@@ -104,11 +108,11 @@ namespace driver {
typedef arglist_type::const_reverse_iterator const_reverse_iterator;
private:
- /// The full list of arguments.
- arglist_type &Args;
+ /// The internal list of arguments.
+ arglist_type Args;
protected:
- ArgList(arglist_type &Args);
+ ArgList();
public:
virtual ~ArgList();
@@ -175,10 +179,17 @@ namespace driver {
Arg *getLastArg(OptSpecifier Id) const;
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1) const;
Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3) const;
/// getArgString - Return the input argument string at \arg Index.
virtual const char *getArgString(unsigned Index) const = 0;
+ /// getNumInputArgStrings - Return the number of original argument strings,
+ /// which are guaranteed to be the first strings in the argument string
+ /// list.
+ virtual unsigned getNumInputArgStrings() const = 0;
+
/// @}
/// @name Argument Lookup Utilities
/// @{
@@ -249,14 +260,16 @@ namespace driver {
}
const char *MakeArgString(const llvm::Twine &Str) const;
+ /// \brief Create an arg string for (\arg LHS + \arg RHS), reusing the
+ /// string at \arg Index if possible.
+ const char *GetOrMakeJoinedArgString(unsigned Index, llvm::StringRef LHS,
+ llvm::StringRef RHS) const;
+
/// @}
};
class InputArgList : public ArgList {
private:
- /// The internal list of arguments.
- arglist_type ActualArgs;
-
/// List of argument strings used by the contained Args.
///
/// This is mutable since we treat the ArgList as being the list
@@ -276,16 +289,15 @@ namespace driver {
public:
InputArgList(const char **ArgBegin, const char **ArgEnd);
- InputArgList(const ArgList &);
~InputArgList();
virtual const char *getArgString(unsigned Index) const {
return ArgStrings[Index];
}
- /// getNumInputArgStrings - Return the number of original input
- /// argument strings.
- unsigned getNumInputArgStrings() const { return NumInputArgStrings; }
+ virtual unsigned getNumInputArgStrings() const {
+ return NumInputArgStrings;
+ }
/// @name Arg Synthesis
/// @{
@@ -303,34 +315,71 @@ namespace driver {
/// DerivedArgList - An ordered collection of driver arguments,
/// whose storage may be in another argument list.
class DerivedArgList : public ArgList {
- InputArgList &BaseArgs;
-
- /// The internal list of arguments.
- arglist_type ActualArgs;
+ const InputArgList &BaseArgs;
/// The list of arguments we synthesized.
mutable arglist_type SynthesizedArgs;
- /// Is this only a proxy for the base ArgList?
- bool OnlyProxy;
-
public:
/// Construct a new derived arg list from \arg BaseArgs.
- ///
- /// \param OnlyProxy - If true, this is only a proxy for the base
- /// list (to adapt the type), and it's Args list is unused.
- DerivedArgList(InputArgList &BaseArgs, bool OnlyProxy);
+ DerivedArgList(const InputArgList &BaseArgs);
~DerivedArgList();
virtual const char *getArgString(unsigned Index) const {
return BaseArgs.getArgString(Index);
}
+ virtual unsigned getNumInputArgStrings() const {
+ return BaseArgs.getNumInputArgStrings();
+ }
+
+ const InputArgList &getBaseArgs() const {
+ return BaseArgs;
+ }
+
/// @name Arg Synthesis
/// @{
+ /// AddSynthesizedArg - Add a argument to the list of synthesized arguments
+ /// (to be freed).
+ void AddSynthesizedArg(Arg *A) {
+ SynthesizedArgs.push_back(A);
+ }
+
virtual const char *MakeArgString(llvm::StringRef Str) const;
+ /// AddFlagArg - Construct a new FlagArg for the given option \arg Id and
+ /// append it to the argument list.
+ void AddFlagArg(const Arg *BaseArg, const Option *Opt) {
+ append(MakeFlagArg(BaseArg, Opt));
+ }
+
+ /// AddPositionalArg - Construct a new Positional arg for the given option
+ /// \arg Id, with the provided \arg Value and append it to the argument
+ /// list.
+ void AddPositionalArg(const Arg *BaseArg, const Option *Opt,
+ llvm::StringRef Value) {
+ append(MakePositionalArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddSeparateArg - Construct a new Positional arg for the given option
+ /// \arg Id, with the provided \arg Value and append it to the argument
+ /// list.
+ void AddSeparateArg(const Arg *BaseArg, const Option *Opt,
+ llvm::StringRef Value) {
+ append(MakeSeparateArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddJoinedArg - Construct a new Positional arg for the given option \arg
+ /// Id, with the provided \arg Value and append it to the argument list.
+ void AddJoinedArg(const Arg *BaseArg, const Option *Opt,
+ llvm::StringRef Value) {
+ append(MakeJoinedArg(BaseArg, Opt, Value));
+ }
+
+
/// MakeFlagArg - Construct a new FlagArg for the given option
/// \arg Id.
Arg *MakeFlagArg(const Arg *BaseArg, const Option *Opt) const;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
index af52618..e9e5dd4 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -18,6 +18,8 @@ include "OptParser.td"
// Target Options
//===----------------------------------------------------------------------===//
+def cxx_abi : Separate<"-cxx-abi">,
+ HelpText<"Target a particular C++ ABI type">;
def target_abi : Separate<"-target-abi">,
HelpText<"Target a particular ABI type">;
def target_cpu : Separate<"-target-cpu">,
@@ -79,6 +81,8 @@ def analyzer_display_progress : Flag<"-analyzer-display-progress">,
HelpText<"Emit verbose output about the analyzer's progress">;
def analyzer_experimental_checks : Flag<"-analyzer-experimental-checks">,
HelpText<"Use experimental path-sensitive checks">;
+def analyzer_idempotent_operation : Flag<"-analyzer-idempotent-operation">,
+ HelpText<"Use experimental path-sensitive idempotent operation checker">;
def analyzer_experimental_internal_checks :
Flag<"-analyzer-experimental-internal-checks">,
HelpText<"Use new default path-sensitive checks currently in testing">;
@@ -121,6 +125,8 @@ def fno_common : Flag<"-fno-common">,
HelpText<"Compile common globals like normal definitions">;
def no_implicit_float : Flag<"-no-implicit-float">,
HelpText<"Don't generate implicit floating point instructions (x86-only)">;
+def finstrument_functions : Flag<"-finstrument-functions">,
+ HelpText<"Generate calls to instrument function entry and exit">;
def fno_merge_all_constants : Flag<"-fno-merge-all-constants">,
HelpText<"Disallow merging of constants.">;
def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">,
@@ -145,6 +151,8 @@ def mlimit_float_precision : Separate<"-mlimit-float-precision">,
HelpText<"Limit float precision to the given value">;
def mno_zero_initialized_in_bss : Flag<"-mno-zero-initialized-in-bss">,
HelpText<"Do not put zero initialized data in the BSS">;
+def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">,
+ HelpText<"Omit frame pointer setup for leaf functions.">;
def msoft_float : Flag<"-msoft-float">,
HelpText<"Use software floating point">;
def mrelax_all : Flag<"-mrelax-all">,
@@ -182,6 +190,9 @@ def fno_show_column : Flag<"-fno-show-column">,
HelpText<"Do not include column number on diagnostics">;
def fno_show_source_location : Flag<"-fno-show-source-location">,
HelpText<"Do not include source location information with diagnostics">;
+def fshow_overloads_EQ : Joined<"-fshow-overloads=">,
+ HelpText<"Which overload candidates to show when overload resolution fails: "
+ "best|all; defaults to all">;
def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">,
HelpText<"Do not include source line and caret with diagnostics">;
def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">,
@@ -259,8 +270,11 @@ def cxx_inheritance_view : Separate<"-cxx-inheritance-view">,
def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
def load : Separate<"-load">, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
-def plugin : Separate<"-plugin">,
+def plugin : Separate<"-plugin">, MetaVarName<"<name>">,
HelpText<"Use the named plugin action (use \"help\" to list available options)">;
+def plugin_arg : JoinedAndSeparate<"-plugin-arg-">,
+ MetaVarName<"<name> <arg>">,
+ HelpText<"Pass <arg> to plugin <name>">;
def resource_dir : Separate<"-resource-dir">,
HelpText<"The directory which holds the compiler resource files">;
def version : Flag<"-version">,
@@ -333,6 +347,8 @@ def rewrite_macros : Flag<"-rewrite-macros">,
def relocatable_pch : Flag<"-relocatable-pch">,
HelpText<"Whether to build a relocatable precompiled header">;
+def chained_pch : Flag<"-chained-pch">,
+ HelpText<"Whether to chain the new precompiled header to the old one.">;
def print_stats : Flag<"-print-stats">,
HelpText<"Print performance metrics and statistics">;
def ftime_report : Flag<"-ftime-report">,
@@ -397,6 +413,8 @@ def fno_operator_names : Flag<"-fno-operator-names">,
HelpText<"Do not treat C++ operator name keywords as synonyms for operators">;
def fno_signed_char : Flag<"-fno-signed-char">,
HelpText<"Char is unsigned">;
+def fno_spell_checking : Flag<"-fno-spell-checking">,
+ HelpText<"Disable spell-checking">;
def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">,
HelpText<"Don't use __cxa_atexit for calling destructors">;
def fconstant_string_class : Separate<"-fconstant-string-class">,
@@ -418,6 +436,8 @@ def fobjc_nonfragile_abi2 : Flag<"-fobjc-nonfragile-abi2">,
HelpText<"enable objective-c's enhanced nonfragile abi">;
def ftrapv : Flag<"-ftrapv">,
HelpText<"Trap on integer overflow">;
+def fwrapv : Flag<"-fwrapv">,
+ HelpText<"Treat signed integer overflow as two's complement">;
def pic_level : Separate<"-pic-level">,
HelpText<"Value for __PIC__">;
def pthread : Flag<"-pthread">,
@@ -434,6 +454,8 @@ def stack_protector : Separate<"-stack-protector">,
HelpText<"Enable stack protectors">;
def fvisibility : Separate<"-fvisibility">,
HelpText<"Default symbol visibility">;
+def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">,
+ HelpText<"Give inline C++ member functions default visibility by default">;
def ftemplate_depth : Separate<"-ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">;
def trigraphs : Flag<"-trigraphs">,
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
index 56786a7..5f062a1 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
@@ -40,13 +40,18 @@ class Compilation {
/// The original (untranslated) input argument list.
InputArgList *Args;
+ /// The driver translated arguments. Note that toolchains may perform their
+ /// own argument translation.
+ DerivedArgList *TranslatedArgs;
+
/// The list of actions.
ActionList Actions;
/// The root list of jobs.
JobList Jobs;
- /// Cache of translated arguments for a particular tool chain.
+ /// Cache of translated arguments for a particular tool chain and bound
+ /// architecture.
llvm::DenseMap<std::pair<const ToolChain*, const char*>,
DerivedArgList*> TCArgs;
@@ -58,14 +63,16 @@ class Compilation {
public:
Compilation(const Driver &D, const ToolChain &DefaultToolChain,
- InputArgList *Args);
+ InputArgList *Args, DerivedArgList *TranslatedArgs);
~Compilation();
const Driver &getDriver() const { return TheDriver; }
const ToolChain &getDefaultToolChain() const { return DefaultToolChain; }
- const InputArgList &getArgs() const { return *Args; }
+ const InputArgList &getInputArgs() const { return *Args; }
+
+ const DerivedArgList &getArgs() const { return *TranslatedArgs; }
ActionList &getActions() { return Actions; }
const ActionList &getActions() const { return Actions; }
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
index 90c3a0d..bb578b5 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -31,6 +31,7 @@ namespace driver {
class Action;
class ArgList;
class Compilation;
+ class DerivedArgList;
class HostInfo;
class InputArgList;
class InputInfo;
@@ -61,6 +62,9 @@ public:
/// command line.
std::string Dir;
+ /// The original path to the clang executable.
+ std::string ClangExecutable;
+
/// The path to the compiler resource directory.
std::string ResourceDir;
@@ -135,6 +139,11 @@ private:
std::list<std::string> TempFiles;
std::list<std::string> ResultFiles;
+private:
+ /// TranslateInputArgs - Create a new derived argument list from the input
+ /// arguments, after applying the standard argument translations.
+ DerivedArgList *TranslateInputArgs(const InputArgList &Args) const;
+
public:
Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
llvm::StringRef _DefaultHostTriple,
@@ -157,6 +166,11 @@ public:
const std::string &getTitle() { return DriverTitle; }
void setTitle(std::string Value) { DriverTitle = Value; }
+ /// \brief Get the path to the main clang executable.
+ std::string getClangProgramPath() const {
+ return ClangExecutable;
+ }
+
/// @}
/// @name Primary Functionality
/// @{
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h b/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
index ca1ee9a..1b99a44 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/HostInfo.h
@@ -76,6 +76,8 @@ const HostInfo *createOpenBSDHostInfo(const Driver &D,
const llvm::Triple& Triple);
const HostInfo *createFreeBSDHostInfo(const Driver &D,
const llvm::Triple& Triple);
+const HostInfo *createMinixHostInfo(const Driver &D,
+ const llvm::Triple& Triple);
const HostInfo *createDragonFlyHostInfo(const Driver &D,
const llvm::Triple& Triple);
const HostInfo *createLinuxHostInfo(const Driver &D,
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Makefile b/contrib/llvm/tools/clang/include/clang/Driver/Makefile
index b462aaa..d829166 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Makefile
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Makefile
@@ -1,9 +1,9 @@
-LEVEL = ../../../../..
+CLANG_LEVEL := ../../..
BUILT_SOURCES = Options.inc CC1Options.inc CC1AsOptions.inc
TABLEGEN_INC_FILES_COMMON = 1
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
$(ObjDir)/Options.inc.tmp : Options.td OptParser.td $(TBLGEN) $(ObjDir)/.dir
$(Echo) "Building Clang Driver Option tables with tblgen"
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
index edae75c..e4a2eba 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -33,6 +33,7 @@ namespace options {
}
class Arg;
+ class ArgList;
class InputArgList;
class Option;
@@ -150,7 +151,7 @@ namespace options {
/// \return - The parsed argument, or 0 if the argument is missing values
/// (in which case Index still points at the conceptual next argument string
/// to parse).
- Arg *ParseOneArg(const InputArgList &Args, unsigned &Index) const;
+ Arg *ParseOneArg(const ArgList &Args, unsigned &Index) const;
/// ParseArgs - Parse an list of arguments into an InputArgList.
///
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Option.h b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
index 08b94b1..0864382 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Option.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
@@ -21,7 +21,7 @@ using llvm::dyn_cast_or_null;
namespace clang {
namespace driver {
class Arg;
- class InputArgList;
+ class ArgList;
class OptionGroup;
/// Option - Abstract representation for a single form of driver
@@ -50,6 +50,13 @@ namespace driver {
JoinedAndSeparateClass
};
+ enum RenderStyleKind {
+ RenderCommaJoinedStyle,
+ RenderJoinedStyle,
+ RenderSeparateStyle,
+ RenderValuesStyle
+ };
+
private:
OptionClass Kind;
@@ -65,7 +72,7 @@ namespace driver {
/// Option that this is an alias for, if any.
const Option *Alias;
- /// Unsupported options will not be rejected.
+ /// Unsupported options will be rejected.
bool Unsupported : 1;
/// Treat this option like a linker input?
@@ -76,11 +83,8 @@ namespace driver {
// FIXME: We should ditch the render/renderAsInput distinction.
bool NoOptAsInput : 1;
- /// Always render this option as separate form its value.
- bool ForceSeparateRender : 1;
-
- /// Always render this option joined with its value.
- bool ForceJoinedRender : 1;
+ /// The style to using when rendering arguments parsed by this option.
+ unsigned RenderStyle : 2;
/// This option is only consumed by the driver.
bool DriverOption : 1;
@@ -109,11 +113,10 @@ namespace driver {
bool hasNoOptAsInput() const { return NoOptAsInput; }
void setNoOptAsInput(bool Value) { NoOptAsInput = Value; }
- bool hasForceSeparateRender() const { return ForceSeparateRender; }
- void setForceSeparateRender(bool Value) { ForceSeparateRender = Value; }
-
- bool hasForceJoinedRender() const { return ForceJoinedRender; }
- void setForceJoinedRender(bool Value) { ForceJoinedRender = Value; }
+ RenderStyleKind getRenderStyle() const {
+ return RenderStyleKind(RenderStyle);
+ }
+ void setRenderStyle(RenderStyleKind Value) { RenderStyle = Value; }
bool isDriverOption() const { return DriverOption; }
void setDriverOption(bool Value) { DriverOption = Value; }
@@ -151,7 +154,7 @@ namespace driver {
/// If the option accepts the current argument, accept() sets
/// Index to the position where argument parsing should resume
/// (even if the argument is missing values).
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const = 0;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const = 0;
void dump() const;
@@ -164,7 +167,7 @@ namespace driver {
public:
OptionGroup(OptSpecifier ID, const char *Name, const OptionGroup *Group);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::GroupClass;
@@ -179,7 +182,7 @@ namespace driver {
public:
InputOption(OptSpecifier ID);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::InputClass;
@@ -192,7 +195,7 @@ namespace driver {
public:
UnknownOption(OptSpecifier ID);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::UnknownClass;
@@ -207,7 +210,7 @@ namespace driver {
FlagOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::FlagClass;
@@ -220,7 +223,7 @@ namespace driver {
JoinedOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::JoinedClass;
@@ -233,7 +236,7 @@ namespace driver {
SeparateOption(OptSpecifier ID, const char *Name,
const OptionGroup *Group, const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::SeparateClass;
@@ -246,7 +249,7 @@ namespace driver {
CommaJoinedOption(OptSpecifier ID, const char *Name,
const OptionGroup *Group, const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::CommaJoinedClass;
@@ -267,7 +270,7 @@ namespace driver {
unsigned getNumArgs() const { return NumArgs; }
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::MultiArgClass;
@@ -282,7 +285,7 @@ namespace driver {
JoinedOrSeparateOption(OptSpecifier ID, const char *Name,
const OptionGroup *Group, const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::JoinedOrSeparateClass;
@@ -297,7 +300,7 @@ namespace driver {
JoinedAndSeparateOption(OptSpecifier ID, const char *Name,
const OptionGroup *Group, const Option *Alias);
- virtual Arg *accept(const InputArgList &Args, unsigned &Index) const;
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
static bool classof(const Option *O) {
return O->getKind() == Option::JoinedAndSeparateClass;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
index 181cce5..1cc5ddc 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -124,11 +124,11 @@ def C : Flag<"-C">;
def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>;
def E : Flag<"-E">, Flags<[DriverOption]>,
HelpText<"Only run the preprocessor">;
-def F : JoinedOrSeparate<"-F">;
+def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined]>;
def H : Flag<"-H">;
def I_ : Flag<"-I-">, Group<I_Group>;
def I : JoinedOrSeparate<"-I">, Group<I_Group>;
-def L : JoinedOrSeparate<"-L">;
+def L : JoinedOrSeparate<"-L">, Flags<[RenderJoined]>;
def MD : Flag<"-MD">, Group<M_Group>;
def MF : JoinedOrSeparate<"-MF">, Group<M_Group>;
def MG : Flag<"-MG">, Group<M_Group>;
@@ -230,6 +230,7 @@ def exported__symbols__list : Separate<"-exported_symbols_list">;
def e : JoinedOrSeparate<"-e">;
def fPIC : Flag<"-fPIC">, Group<f_Group>;
def fPIE : Flag<"-fPIE">, Group<f_Group>;
+def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>;
def fasm : Flag<"-fasm">, Group<f_Group>;
@@ -244,6 +245,7 @@ def fbootclasspath_EQ : Joined<"-fbootclasspath=">, Group<f_Group>;
def fbuiltin_strcat : Flag<"-fbuiltin-strcat">, Group<f_Group>;
def fbuiltin_strcpy : Flag<"-fbuiltin-strcpy">, Group<f_Group>;
def fbuiltin : Flag<"-fbuiltin">, Group<f_Group>;
+def fcaret_diagnostics : Flag<"-fcaret-diagnostics">, Group<f_Group>;
def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
Group<f_Group>, HelpText<"Generate runtime checks for undefined behavior.">;
def fclasspath_EQ : Joined<"-fclasspath=">, Group<f_Group>;
@@ -267,6 +269,8 @@ def fencoding_EQ : Joined<"-fencoding=">, Group<f_Group>;
def fexceptions : Flag<"-fexceptions">, Group<f_Group>;
def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
def fhosted : Flag<"-fhosted">, Group<f_Group>;
+def ffast_math : Flag<"-ffast-math">, Group<clang_ignored_f_Group>;
+def ffinite_math_only : Flag<"-ffinite-math-only">, Group<clang_ignored_f_Group>;
def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>;
def fformat_extensions: Flag<"-fformat-extensions">;
def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>;
@@ -276,6 +280,7 @@ def filelist : Separate<"-filelist">, Flags<[LinkerInput]>;
def findirect_virtual_calls : Flag<"-findirect-virtual-calls">, Group<f_Group>;
def finline_functions : Flag<"-finline-functions">, Group<clang_ignored_f_Group>;
def finline : Flag<"-finline">, Group<clang_ignored_f_Group>;
+def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>;
def fkeep_inline_functions : Flag<"-fkeep-inline-functions">, Group<clang_ignored_f_Group>;
def flat__namespace : Flag<"-flat_namespace">;
def flax_vector_conversions : Flag<"-flax-vector-conversions">, Group<f_Group>;
@@ -308,6 +313,7 @@ def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_
def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>;
def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
+def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<clang_ignored_f_Group>;
def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>;
def fno_inline_functions : Flag<"-fno-inline-functions">, Group<clang_ignored_f_Group>;
def fno_inline : Flag<"-fno-inline">, Group<clang_ignored_f_Group>;
@@ -322,6 +328,7 @@ def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>;
def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>;
def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>;
+def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>;
def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<clang_ignored_f_Group>;
def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>;
@@ -351,6 +358,7 @@ def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>;
def fpch_preprocess : Flag<"-fpch-preprocess">, Group<f_Group>;
def fpic : Flag<"-fpic">, Group<f_Group>;
def fpie : Flag<"-fpie">, Group<f_Group>;
+def fno_pie : Flag<"-fno-pie">, Group<f_Group>;
def fprofile_arcs : Flag<"-fprofile-arcs">, Group<f_Group>;
def fprofile_generate : Flag<"-fprofile-generate">, Group<f_Group>;
def framework : Separate<"-framework">, Flags<[LinkerInput]>;
@@ -359,7 +367,9 @@ def fsched_interblock : Flag<"-fsched-interblock">, Group<clang_ignored_f_Group>
def fshort_enums : Flag<"-fshort-enums">, Group<clang_ignored_f_Group>;
def freorder_blocks : Flag<"-freorder-blocks">, Group<clang_ignored_f_Group>;
def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>;
+def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>;
def fshow_source_location : Flag<"-fshow-source-location">, Group<f_Group>;
+def fspell_checking : Flag<"-fspell-checking">, Group<f_Group>;
def fsigned_bitfields : Flag<"-fsigned-bitfields">, Group<f_Group>;
def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
@@ -382,6 +392,8 @@ def funwind_tables : Flag<"-funwind-tables">, Group<f_Group>;
def fuse_cxa_atexit : Flag<"-fuse-cxa-atexit">, Group<f_Group>;
def fverbose_asm : Flag<"-fverbose-asm">, Group<f_Group>;
def fvisibility_EQ : Joined<"-fvisibility=">, Group<f_Group>;
+def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">, Group<f_Group>;
+def fwrapv : Flag<"-fwrapv">, Group<f_Group>;
def fwritable_strings : Flag<"-fwritable-strings">, Group<f_Group>;
def fzero_initialized_in_bss : Flag<"-fzero-initialized-in-bss">, Group<f_Group>;
def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>;
@@ -427,10 +439,11 @@ def mfix_and_continue : Flag<"-mfix-and-continue">, Group<clang_ignored_m_Group>
def mfloat_abi_EQ : Joined<"-mfloat-abi=">, Group<m_Group>;
def mfpu_EQ : Joined<"-mfpu=">, Group<m_Group>;
def mhard_float : Flag<"-mhard-float">, Group<m_Group>;
-def miphoneos_version_min_EQ : Joined<"-miphoneos-version-min=">, Group<m_Group>, Flags<[DriverOption]>;
+def miphoneos_version_min_EQ : Joined<"-miphoneos-version-min=">, Group<m_Group>;
+def mios_version_min_EQ : Joined<"-mios-version-min=">, Alias<miphoneos_version_min_EQ>;
def mkernel : Flag<"-mkernel">, Group<m_Group>;
def mllvm : Separate<"-mllvm">;
-def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>, Flags<[DriverOption]>;
+def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
@@ -454,6 +467,8 @@ def mno_thumb : Flag<"-mno-thumb">, Group<m_Group>;
def marm : Flag<"-marm">, Alias<mno_thumb>;
def mno_warn_nonportable_cfstrings : Flag<"-mno-warn-nonportable-cfstrings">, Group<m_Group>;
+def mno_omit_leaf_frame_pointer : Flag<"-mno-omit-leaf-frame-pointer">, Group<f_Group>;
+def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>;
def mpascal_strings : Flag<"-mpascal-strings">, Group<m_Group>;
def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>;
@@ -586,7 +601,7 @@ def y : Joined<"-y">;
// options.
def _CLASSPATH_EQ : Joined<"--CLASSPATH=">, Alias<fclasspath_EQ>;
-def _CLASSPATH : Separate<"--CLASSPATH">, Alias<fclasspath_EQ>, Flags<[RenderJoined]>;
+def _CLASSPATH : Separate<"--CLASSPATH">, Alias<fclasspath_EQ>;
def _all_warnings : Flag<"--all-warnings">, Alias<Wall>;
def _analyze_auto : Flag<"--analyze-auto">, Flags<[DriverOption]>;
def _analyzer_no_default_checks : Flag<"--analyzer-no-default-checks">, Flags<[DriverOption]>;
@@ -595,80 +610,80 @@ def _analyze : Flag<"--analyze">, Flags<[DriverOption]>,
HelpText<"Run the static analyzer">;
def _ansi : Flag<"--ansi">, Alias<ansi>;
def _assemble : Flag<"--assemble">, Alias<S>;
-def _assert_EQ : Joined<"--assert=">, Alias<A>, Flags<[RenderSeparate]>;
+def _assert_EQ : Joined<"--assert=">, Alias<A>;
def _assert : Separate<"--assert">, Alias<A>;
def _bootclasspath_EQ : Joined<"--bootclasspath=">, Alias<fbootclasspath_EQ>;
-def _bootclasspath : Separate<"--bootclasspath">, Alias<fbootclasspath_EQ>, Flags<[RenderJoined]>;
+def _bootclasspath : Separate<"--bootclasspath">, Alias<fbootclasspath_EQ>;
def _classpath_EQ : Joined<"--classpath=">, Alias<fclasspath_EQ>;
-def _classpath : Separate<"--classpath">, Alias<fclasspath_EQ>, Flags<[RenderJoined]>;
-def _combine : Flag<"--combine">, Alias<combine>, Flags<[Unsupported]>;
+def _classpath : Separate<"--classpath">, Alias<fclasspath_EQ>;
+def _combine : Flag<"--combine">, Alias<combine>;
def _comments_in_macros : Flag<"--comments-in-macros">, Alias<CC>;
def _comments : Flag<"--comments">, Alias<C>;
def _compile : Flag<"--compile">, Alias<c>;
def _constant_cfstrings : Flag<"--constant-cfstrings">;
def _coverage : Flag<"--coverage">, Alias<coverage>;
-def _debug_EQ : Joined<"--debug=">, Alias<g_Flag>, Flags<[Unsupported]>;
-def _debug : Flag<"--debug">, Alias<g_Flag>, Flags<[Unsupported]>;
+def _debug_EQ : Joined<"--debug=">, Alias<g_Flag>;
+def _debug : Flag<"--debug">, Alias<g_Flag>;
def _define_macro_EQ : Joined<"--define-macro=">, Alias<D>;
-def _define_macro : Separate<"--define-macro">, Alias<D>, Flags<[RenderJoined]>;
+def _define_macro : Separate<"--define-macro">, Alias<D>;
def _dependencies : Flag<"--dependencies">, Alias<M>;
def _encoding_EQ : Joined<"--encoding=">, Alias<fencoding_EQ>;
-def _encoding : Separate<"--encoding">, Alias<fencoding_EQ>, Flags<[RenderJoined]>;
+def _encoding : Separate<"--encoding">, Alias<fencoding_EQ>;
def _entry : Flag<"--entry">, Alias<e>;
def _extdirs_EQ : Joined<"--extdirs=">, Alias<fextdirs_EQ>;
-def _extdirs : Separate<"--extdirs">, Alias<fextdirs_EQ>, Flags<[RenderJoined]>;
+def _extdirs : Separate<"--extdirs">, Alias<fextdirs_EQ>;
def _extra_warnings : Flag<"--extra-warnings">, Alias<W_Joined>;
-def _for_linker_EQ : Joined<"--for-linker=">, Alias<Xlinker>, Flags<[LinkerInput, RenderAsInput, RenderSeparate]>;
-def _for_linker : Separate<"--for-linker">, Alias<Xlinker>, Flags<[LinkerInput, RenderAsInput]>;
-def _force_link_EQ : Joined<"--force-link=">, Alias<u>, Flags<[RenderSeparate]>;
+def _for_linker_EQ : Joined<"--for-linker=">, Alias<Xlinker>;
+def _for_linker : Separate<"--for-linker">, Alias<Xlinker>;
+def _force_link_EQ : Joined<"--force-link=">, Alias<u>;
def _force_link : Separate<"--force-link">, Alias<u>;
def _help_hidden : Flag<"--help-hidden">;
def _help : Flag<"--help">,
HelpText<"Display available options">;
-def _imacros_EQ : Joined<"--imacros=">, Alias<imacros>, Flags<[RenderSeparate]>;
+def _imacros_EQ : Joined<"--imacros=">, Alias<imacros>;
def _imacros : Separate<"--imacros">, Alias<imacros>;
def _include_barrier : Flag<"--include-barrier">, Alias<I_>;
-def _include_directory_after_EQ : Joined<"--include-directory-after=">, Alias<idirafter>, Flags<[RenderSeparate]>;
+def _include_directory_after_EQ : Joined<"--include-directory-after=">, Alias<idirafter>;
def _include_directory_after : Separate<"--include-directory-after">, Alias<idirafter>;
def _include_directory_EQ : Joined<"--include-directory=">, Alias<I>;
-def _include_directory : Separate<"--include-directory">, Alias<I>, Flags<[RenderJoined]>;
-def _include_prefix_EQ : Joined<"--include-prefix=">, Alias<iprefix>, Flags<[RenderSeparate]>;
+def _include_directory : Separate<"--include-directory">, Alias<I>;
+def _include_prefix_EQ : Joined<"--include-prefix=">, Alias<iprefix>;
def _include_prefix : Separate<"--include-prefix">, Alias<iprefix>;
-def _include_with_prefix_after_EQ : Joined<"--include-with-prefix-after=">, Alias<iwithprefix>, Flags<[RenderSeparate]>;
+def _include_with_prefix_after_EQ : Joined<"--include-with-prefix-after=">, Alias<iwithprefix>;
def _include_with_prefix_after : Separate<"--include-with-prefix-after">, Alias<iwithprefix>;
-def _include_with_prefix_before_EQ : Joined<"--include-with-prefix-before=">, Alias<iwithprefixbefore>, Flags<[RenderSeparate]>;
+def _include_with_prefix_before_EQ : Joined<"--include-with-prefix-before=">, Alias<iwithprefixbefore>;
def _include_with_prefix_before : Separate<"--include-with-prefix-before">, Alias<iwithprefixbefore>;
-def _include_with_prefix_EQ : Joined<"--include-with-prefix=">, Alias<iwithprefix>, Flags<[RenderSeparate]>;
+def _include_with_prefix_EQ : Joined<"--include-with-prefix=">, Alias<iwithprefix>;
def _include_with_prefix : Separate<"--include-with-prefix">, Alias<iwithprefix>;
-def _include_EQ : Joined<"--include=">, Alias<include_>, Flags<[RenderSeparate]>;
+def _include_EQ : Joined<"--include=">, Alias<include_>;
def _include : Separate<"--include">, Alias<include_>;
-def _language_EQ : Joined<"--language=">, Alias<x>, Flags<[RenderSeparate]>;
+def _language_EQ : Joined<"--language=">, Alias<x>;
def _language : Separate<"--language">, Alias<x>;
-def _library_directory_EQ : Joined<"--library-directory=">, Alias<L>, Flags<[RenderSeparate]>;
+def _library_directory_EQ : Joined<"--library-directory=">, Alias<L>;
def _library_directory : Separate<"--library-directory">, Alias<L>;
-def _machine__EQ : Joined<"--machine-=">, Alias<m_Joined>, Flags<[Unsupported]>;
-def _machine_ : Joined<"--machine-">, Alias<m_Joined>, Flags<[Unsupported]>;
+def _machine__EQ : Joined<"--machine-=">, Alias<m_Joined>;
+def _machine_ : Joined<"--machine-">, Alias<m_Joined>;
def _machine_EQ : Joined<"--machine=">, Alias<m_Joined>;
-def _machine : Separate<"--machine">, Alias<m_Joined>, Flags<[RenderJoined]>;
+def _machine : Separate<"--machine">, Alias<m_Joined>;
def _no_integrated_cpp : Flag<"--no-integrated-cpp">, Alias<no_integrated_cpp>;
def _no_line_commands : Flag<"--no-line-commands">, Alias<P>;
def _no_standard_includes : Flag<"--no-standard-includes">, Alias<nostdinc>;
def _no_standard_libraries : Flag<"--no-standard-libraries">, Alias<nostdlib>;
def _no_undefined : Flag<"--no-undefined">, Flags<[LinkerInput]>;
def _no_warnings : Flag<"--no-warnings">, Alias<w>;
-def _optimize_EQ : Joined<"--optimize=">, Alias<O>, Flags<[Unsupported]>;
-def _optimize : Flag<"--optimize">, Alias<O>, Flags<[Unsupported]>;
+def _optimize_EQ : Joined<"--optimize=">, Alias<O>;
+def _optimize : Flag<"--optimize">, Alias<O>;
def _output_class_directory_EQ : Joined<"--output-class-directory=">, Alias<foutput_class_dir_EQ>;
-def _output_class_directory : Separate<"--output-class-directory">, Alias<foutput_class_dir_EQ>, Flags<[RenderJoined]>;
-def _output_EQ : Joined<"--output=">, Alias<o>, Flags<[RenderSeparate]>;
+def _output_class_directory : Separate<"--output-class-directory">, Alias<foutput_class_dir_EQ>;
+def _output_EQ : Joined<"--output=">, Alias<o>;
def _output : Separate<"--output">, Alias<o>;
def _param : Separate<"--param">;
-def _param_EQ : Joined<"--param=">, Alias<_param>, Flags<[RenderSeparate]>;
+def _param_EQ : Joined<"--param=">, Alias<_param>;
def _pass_exit_codes : Flag<"--pass-exit-codes">, Alias<pass_exit_codes>;
def _pedantic_errors : Flag<"--pedantic-errors">, Alias<pedantic_errors>;
def _pedantic : Flag<"--pedantic">, Alias<pedantic>;
-def _pipe : Flag<"--pipe">, Alias<pipe>, Flags<[DriverOption]>;
-def _prefix_EQ : Joined<"--prefix=">, Alias<B>, Flags<[RenderSeparate]>;
+def _pipe : Flag<"--pipe">, Alias<pipe>;
+def _prefix_EQ : Joined<"--prefix=">, Alias<B>;
def _prefix : Separate<"--prefix">, Alias<B>;
def _preprocess : Flag<"--preprocess">, Alias<E>;
def _print_diagnostic_categories : Flag<"--print-diagnostic-categories">;
@@ -687,30 +702,33 @@ def _profile : Flag<"--profile">, Alias<p>;
def _relocatable_pch : Flag<"--relocatable-pch">,
HelpText<"Build a relocatable precompiled header">;
def _resource_EQ : Joined<"--resource=">, Alias<fcompile_resource_EQ>;
-def _resource : Separate<"--resource">, Alias<fcompile_resource_EQ>, Flags<[RenderJoined]>;
+def _resource : Separate<"--resource">, Alias<fcompile_resource_EQ>;
def _save_temps : Flag<"--save-temps">, Alias<save_temps>;
def _shared : Flag<"--shared">, Alias<shared>;
def _signed_char : Flag<"--signed-char">, Alias<fsigned_char>;
-def _specs_EQ : Joined<"--specs=">, Alias<specs_EQ>, Flags<[Unsupported]>;
-def _specs : Separate<"--specs">, Alias<specs_EQ>, Flags<[RenderJoined, Unsupported]>;
+def _specs_EQ : Joined<"--specs=">, Alias<specs_EQ>;
+def _specs : Separate<"--specs">, Alias<specs_EQ>;
def _static : Flag<"--static">, Alias<static>;
def _std_EQ : Joined<"--std=">, Alias<std_EQ>;
-def _std : Separate<"--std">, Alias<std_EQ>, Flags<[RenderJoined]>;
+def _std : Separate<"--std">, Alias<std_EQ>;
def _sysroot_EQ : Joined<"--sysroot=">;
-def _sysroot : Separate<"--sysroot">, Alias<_sysroot_EQ>, Flags<[RenderJoined]>;
+def _sysroot : Separate<"--sysroot">, Alias<_sysroot_EQ>;
def _target_help : Flag<"--target-help">;
def _trace_includes : Flag<"--trace-includes">, Alias<H>;
def _traditional_cpp : Flag<"--traditional-cpp">, Alias<traditional_cpp>;
def _traditional : Flag<"--traditional">, Alias<traditional>;
def _trigraphs : Flag<"--trigraphs">, Alias<trigraphs>;
def _undefine_macro_EQ : Joined<"--undefine-macro=">, Alias<U>;
-def _undefine_macro : Separate<"--undefine-macro">, Alias<U>, Flags<[RenderJoined]>;
+def _undefine_macro : Separate<"--undefine-macro">, Alias<U>;
def _unsigned_char : Flag<"--unsigned-char">, Alias<funsigned_char>;
def _user_dependencies : Flag<"--user-dependencies">, Alias<MM>;
def _verbose : Flag<"--verbose">, Alias<v>;
def _version : Flag<"--version">;
-def _warn__EQ : Joined<"--warn-=">, Alias<W_Joined>, Flags<[Unsupported]>;
-def _warn_ : Joined<"--warn-">, Alias<W_Joined>, Flags<[Unsupported]>;
+def _warn__EQ : Joined<"--warn-=">, Alias<W_Joined>;
+def _warn_ : Joined<"--warn-">, Alias<W_Joined>;
def _write_dependencies : Flag<"--write-dependencies">, Alias<MD>;
def _write_user_dependencies : Flag<"--write-user-dependencies">, Alias<MMD>;
-def _ : Joined<"--">, Alias<f>, Flags<[Unsupported]>;
+def _ : Joined<"--">, Flags<[Unsupported]>;
+
+// Special internal option to handle -Xlinker --no-demangle.
+def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">, Flags<[Unsupported]>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
index 1a8ae77..11a153c 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -70,20 +70,22 @@ public:
// Tool access.
/// TranslateArgs - Create a new derived argument list for any argument
- /// translations this ToolChain may wish to perform.
+ /// translations this ToolChain may wish to perform, or 0 if no tool chain
+ /// specific translations are needed.
///
/// \param BoundArch - The bound architecture name, or 0.
- virtual DerivedArgList *TranslateArgs(InputArgList &Args,
- const char *BoundArch) const = 0;
+ virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const {
+ return 0;
+ }
/// SelectTool - Choose a tool to use to handle the action \arg JA.
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const = 0;
// Helper methods
- std::string GetFilePath(const Compilation &C, const char *Name) const;
- std::string GetProgramPath(const Compilation &C, const char *Name,
- bool WantFile = false) const;
+ std::string GetFilePath(const char *Name) const;
+ std::string GetProgramPath(const char *Name, bool WantFile = false) const;
// Platform defaults information
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.def b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
index 61a5043..06a8690 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
@@ -67,15 +67,21 @@ TYPE("f95", PP_Fortran, INVALID, 0, "u")
TYPE("f95-cpp-input", Fortran, PP_Fortran, 0, "u")
TYPE("java", Java, INVALID, 0, "u")
+// LLVM IR/LTO types. We define separate types for IR and LTO because LTO
+// outputs should use the standard suffixes.
+TYPE("ir", LLVM_IR, INVALID, "ll", "u")
+TYPE("ir", LLVM_BC, INVALID, "bc", "u")
+TYPE("lto-ir", LTO_IR, INVALID, "s", "")
+TYPE("lto-bc", LTO_BC, INVALID, "o", "")
+
// Misc.
TYPE("ast", AST, INVALID, "ast", "u")
-TYPE("llvm-asm", LLVMAsm, INVALID, "s", "")
-TYPE("llvm-bc", LLVMBC, INVALID, "o", "")
TYPE("plist", Plist, INVALID, "plist", "")
TYPE("rewritten-objc", RewrittenObjC,INVALID, "cpp", "")
TYPE("precompiled-header", PCH, INVALID, "gch", "A")
TYPE("object", Object, INVALID, "o", "")
TYPE("treelang", Treelang, INVALID, 0, "u")
TYPE("image", Image, INVALID, "out", "")
+TYPE("dSYM", dSYM, INVALID, "dSYM", "A")
TYPE("dependencies", Dependencies, INVALID, "d", "")
TYPE("none", Nothing, INVALID, 0, "u")
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.h b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
index d933230..9187529 100644
--- a/contrib/llvm/tools/clang/include/clang/Driver/Types.h
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
@@ -59,6 +59,10 @@ namespace types {
/// isAcceptedByClang - Can clang handle this input type.
bool isAcceptedByClang(ID Id);
+ /// isOnlyAcceptedByClang - Is clang the only compiler that can handle this
+ /// input type.
+ bool isOnlyAcceptedByClang(ID Id);
+
/// isCXX - Is this a "C++" input (C++ and Obj-C++ sources and headers).
bool isCXX(ID Id);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
index 9163a20..2d1df44 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
@@ -29,6 +29,7 @@ class CodeGenOptions;
class Diagnostic;
class FileManager;
class LangOptions;
+class PCHReader;
class Preprocessor;
class TargetOptions;
@@ -57,25 +58,12 @@ ASTConsumer *CreateASTViewer();
// to stderr; this is intended for debugging.
ASTConsumer *CreateDeclContextPrinter();
-// ObjC rewriter: attempts tp rewrite ObjC constructs into pure C code.
-// This is considered experimental, and only works with Apple's ObjC runtime.
-ASTConsumer *CreateObjCRewriter(const std::string &InFile,
- llvm::raw_ostream *OS,
- Diagnostic &Diags,
- const LangOptions &LOpts,
- bool SilenceRewriteMacroWarning);
-
-/// CreateHTMLPrinter - Create an AST consumer which rewrites source code to
-/// HTML with syntax highlighting suitable for viewing in a web-browser.
-ASTConsumer *CreateHTMLPrinter(llvm::raw_ostream *OS, Preprocessor &PP,
- bool SyntaxHighlight = true,
- bool HighlightMacros = true);
-
// PCH generator: generates a precompiled header file; this file can be used
// later with the PCHReader (clang -cc1 option -include-pch) to speed up compile
// times.
ASTConsumer *CreatePCHGenerator(const Preprocessor &PP,
llvm::raw_ostream *OS,
+ PCHReader *Chain,
const char *isysroot = 0);
// Inheritance viewer: for C++ code, creates a graph of the inheritance
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalysisConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
index 2cbdf36..ab4aed9 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/AnalysisConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
@@ -1,4 +1,4 @@
-//===--- AnalysisConsumer.h - Front-end Analysis Engine Hooks ---*- C++ -*-===//
+//===--- AnalyzerOptions.h - Analysis Engine Options ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This header contains the functions necessary for a front-end to run various
-// analyses.
+// This header contains the structures necessary for a front-end to specify
+// various analyses.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_ANALYSISCONSUMER_H
-#define LLVM_CLANG_FRONTEND_ANALYSISCONSUMER_H
+#ifndef LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
+#define LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
#include <string>
#include <vector>
@@ -72,6 +72,7 @@ public:
unsigned VisualizeEGUbi : 1;
unsigned EnableExperimentalChecks : 1;
unsigned EnableExperimentalInternalChecks : 1;
+ unsigned EnableIdempotentOperationChecker : 1;
unsigned InlineCall : 1;
public:
@@ -92,13 +93,6 @@ public:
}
};
-/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
-/// analysis passes. (The set of analyses run is controlled by command-line
-/// options.)
-ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
- const std::string &output,
- const AnalyzerOptions& Opts);
-
}
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
index 6241230..2918f4e 100644
--- a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CODEGEN_CODEGENOPTIONS_H
-#define LLVM_CLANG_CODEGEN_CODEGENOPTIONS_H
+#ifndef LLVM_CLANG_FRONTEND_CODEGENOPTIONS_H
+#define LLVM_CLANG_FRONTEND_CODEGENOPTIONS_H
#include <string>
@@ -39,7 +39,7 @@ public:
unsigned CXXCtorDtorAliases: 1; /// Emit complete ctors/dtors as linker
/// aliases to base ctors when possible.
unsigned DataSections : 1; /// Set when -fdata-sections is enabled
- unsigned DebugInfo : 1; /// Should generate deubg info (-g).
+ unsigned DebugInfo : 1; /// Should generate debug info (-g).
unsigned DisableFPElim : 1; /// Set when -fomit-frame-pointer is enabled.
unsigned DisableLLVMOpts : 1; /// Don't run any optimizations, for use in
/// getting .bc files that correspond to the
@@ -47,14 +47,18 @@ public:
/// done.
unsigned DisableRedZone : 1; /// Set when -mno-red-zone is enabled.
unsigned FunctionSections : 1; /// Set when -ffunction-sections is enabled
+ unsigned InstrumentFunctions : 1; /// Set when -finstrument-functions is enabled
unsigned MergeAllConstants : 1; /// Merge identical constants.
unsigned NoCommon : 1; /// Set when -fno-common or C++ is enabled.
unsigned NoImplicitFloat : 1; /// Set when -mno-implicit-float is enabled.
unsigned NoZeroInitializedInBSS : 1; /// -fno-zero-initialized-in-bss
unsigned ObjCDispatchMethod : 2; /// Method of Objective-C dispatch to use.
+ unsigned OmitLeafFramePointer : 1; /// Set when -momit-leaf-frame-pointer is
+ /// enabled.
unsigned OptimizationLevel : 3; /// The -O[0-4] option specified.
unsigned OptimizeSize : 1; /// If -Os is specified.
unsigned RelaxAll : 1; /// Relax all machine code instructions.
+ unsigned SimplifyLibCalls : 1; /// Set when -fbuiltin is enabled.
unsigned SoftFloat : 1; /// -soft-float.
unsigned TimePasses : 1; /// Set when -ftime-report is enabled.
unsigned UnitAtATime : 1; /// Unused. For mirroring GCC optimization
@@ -63,6 +67,9 @@ public:
unsigned UnwindTables : 1; /// Emit unwind tables.
unsigned VerifyModule : 1; /// Control whether the module should be run
/// through the LLVM Verifier.
+ unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what Decl*
+ /// various IR entities came from. Only useful
+ /// when running CodeGen as a subroutine.
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -107,15 +114,18 @@ public:
NoImplicitFloat = 0;
NoZeroInitializedInBSS = 0;
ObjCDispatchMethod = Legacy;
+ OmitLeafFramePointer = 0;
OptimizationLevel = 0;
OptimizeSize = 0;
RelaxAll = 0;
+ SimplifyLibCalls = 1;
SoftFloat = 0;
TimePasses = 0;
UnitAtATime = 1;
UnrollLoops = 0;
UnwindTables = 0;
VerifyModule = 1;
+ EmitDeclMetadata = 0;
Inlining = NoInlining;
RelocationModel = "pic";
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
index 06dc800..54ce8bf 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -34,6 +34,7 @@ class DiagnosticClient;
class ExternalASTSource;
class FileManager;
class FrontendAction;
+class PCHReader;
class Preprocessor;
class SourceManager;
class TargetInfo;
@@ -96,6 +97,9 @@ class CompilerInstance {
/// The list of active output files.
std::list< std::pair<std::string, llvm::raw_ostream*> > OutputFiles;
+ /// The PCH reader. Not owned; the ASTContext owns this.
+ PCHReader *Reader;
+
void operator=(const CompilerInstance &); // DO NOT IMPLEMENT
CompilerInstance(const CompilerInstance&); // DO NOT IMPLEMENT
public:
@@ -507,6 +511,9 @@ public:
createPCHExternalASTSource(llvm::StringRef Path, const std::string &Sysroot,
Preprocessor &PP, ASTContext &Context);
+ /// Get the PCH reader, if any.
+ PCHReader *getPCHReader() { return Reader; }
+
/// Create a code completion consumer using the invocation; note that this
/// will cause the source manager to truncate the input source file at the
/// completion point.
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index f5a9053..d558ad3 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -12,8 +12,8 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetOptions.h"
-#include "clang/CodeGen/CodeGenOptions.h"
-#include "clang/Frontend/AnalysisConsumer.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/DependencyOutputOptions.h"
#include "clang/Frontend/DiagnosticOptions.h"
#include "clang/Frontend/FrontendOptions.h"
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
index 8eb66e5..516dc67 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
@@ -10,6 +10,8 @@
#ifndef LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
#define LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
+#include "clang/Basic/Diagnostic.h"
+
#include <string>
#include <vector>
@@ -33,6 +35,8 @@ public:
unsigned ShowCategories : 2; /// Show categories: 0 -> none, 1 -> Number,
/// 2 -> Full Name.
unsigned ShowColors : 1; /// Show diagnostics with ANSI color sequences.
+ unsigned ShowOverloads : 1; /// Overload candidates to show. Values from
+ /// Diagnostic::OverloadsShown
unsigned VerifyDiagnostics: 1; /// Check that diagnostics match the expected
/// diagnostics, indicated by markers in the
/// input source file.
@@ -72,6 +76,7 @@ public:
PedanticErrors = 0;
ShowCarets = 1;
ShowColors = 0;
+ ShowOverloads = Diagnostic::Ovl_All;
ShowColumn = 1;
ShowFixits = 1;
ShowLocation = 1;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
index 7b7db37..f6a68bf 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -13,17 +13,40 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/OwningPtr.h"
#include <string>
+#include <vector>
+
+namespace llvm {
+ class raw_ostream;
+}
namespace clang {
-class ASTUnit;
class ASTConsumer;
-class CompilerInstance;
class ASTMergeAction;
+class ASTUnit;
+class CompilerInstance;
+
+enum InputKind {
+ IK_None,
+ IK_Asm,
+ IK_C,
+ IK_CXX,
+ IK_ObjC,
+ IK_ObjCXX,
+ IK_PreprocessedC,
+ IK_PreprocessedCXX,
+ IK_PreprocessedObjC,
+ IK_PreprocessedObjCXX,
+ IK_OpenCL,
+ IK_AST,
+ IK_LLVM_IR
+};
+
/// FrontendAction - Abstract base class for actions which can be performed by
/// the frontend.
class FrontendAction {
std::string CurrentFile;
+ InputKind CurrentFileKind;
llvm::OwningPtr<ASTUnit> CurrentASTUnit;
CompilerInstance *Instance;
friend class ASTMergeAction;
@@ -101,6 +124,11 @@ public:
return CurrentFile;
}
+ InputKind getCurrentFileKind() const {
+ assert(!CurrentFile.empty() && "No current file!");
+ return CurrentFileKind;
+ }
+
ASTUnit &getCurrentASTUnit() const {
assert(!CurrentASTUnit && "No current AST unit!");
return *CurrentASTUnit;
@@ -110,7 +138,7 @@ public:
return CurrentASTUnit.take();
}
- void setCurrentFile(llvm::StringRef Value, ASTUnit *AST = 0);
+ void setCurrentFile(llvm::StringRef Value, InputKind Kind, ASTUnit *AST = 0);
/// @}
/// @name Supported Modes
@@ -128,8 +156,11 @@ public:
/// hasPCHSupport - Does this action support use with PCH?
virtual bool hasPCHSupport() const { return !usesPreprocessorOnly(); }
- /// hasASTSupport - Does this action support use with AST files?
- virtual bool hasASTSupport() const { return !usesPreprocessorOnly(); }
+ /// hasASTFileSupport - Does this action support use with AST files?
+ virtual bool hasASTFileSupport() const { return !usesPreprocessorOnly(); }
+
+ /// hasIRSupport - Does this action support use with IR files?
+ virtual bool hasIRSupport() const { return false; }
/// hasCodeCompletionSupport - Does this action support use with code
/// completion?
@@ -150,17 +181,18 @@ public:
/// \param Filename - The input filename, which will be made available to
/// clients via \see getCurrentFile().
///
- /// \param IsAST - Indicates whether this is an AST input. AST inputs require
- /// special handling, since the AST file itself contains several objects which
- /// would normally be owned by the CompilerInstance. When processing AST input
- /// files, these objects should generally not be initialized in the
- /// CompilerInstance -- they will automatically be shared with the AST file in
- /// between \see BeginSourceFile() and \see EndSourceFile().
+ /// \param InputKind - The type of input. Some input kinds are handled
+ /// specially, for example AST inputs, since the AST file itself contains
+ /// several objects which would normally be owned by the
+ /// CompilerInstance. When processing AST input files, these objects should
+ /// generally not be initialized in the CompilerInstance -- they will
+ /// automatically be shared with the AST file in between \see
+ /// BeginSourceFile() and \see EndSourceFile().
///
/// \return True on success; the compilation of this file should be aborted
/// and neither Execute nor EndSourceFile should be called.
bool BeginSourceFile(CompilerInstance &CI, llvm::StringRef Filename,
- bool IsAST = false);
+ InputKind Kind);
/// Execute - Set the source managers main input file, and run the action.
void Execute();
@@ -175,6 +207,7 @@ public:
/// ASTFrontendAction - Abstract base class to use for AST consumer based
/// frontend actions.
class ASTFrontendAction : public FrontendAction {
+protected:
/// ExecuteAction - Implement the ExecuteAction interface by running Sema on
/// the already initialized AST consumer.
///
@@ -186,6 +219,16 @@ public:
virtual bool usesPreprocessorOnly() const { return false; }
};
+class PluginASTAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) = 0;
+
+public:
+ virtual bool ParseArgs(const std::vector<std::string>& arg) = 0;
+ virtual void PrintHelp(llvm::raw_ostream&) = 0;
+};
+
/// PreprocessorFrontendAction - Abstract base class to use for preprocessor
/// based frontend actions.
class PreprocessorFrontendAction : public FrontendAction {
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
index cee1c1d..26262cf 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
@@ -15,8 +15,6 @@
#include <vector>
namespace clang {
-class FixItRewriter;
-class FixItPathRewriter;
//===----------------------------------------------------------------------===//
// Custom Consumer Actions
@@ -38,12 +36,6 @@ public:
// AST Consumer Actions
//===----------------------------------------------------------------------===//
-class AnalysisAction : public ASTFrontendAction {
-protected:
- virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile);
-};
-
class ASTPrintAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
@@ -74,26 +66,6 @@ protected:
llvm::StringRef InFile);
};
-class FixItAction : public ASTFrontendAction {
-protected:
- llvm::OwningPtr<FixItRewriter> Rewriter;
- llvm::OwningPtr<FixItPathRewriter> PathRewriter;
-
- virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile);
-
- virtual bool BeginSourceFileAction(CompilerInstance &CI,
- llvm::StringRef Filename);
-
- virtual void EndSourceFileAction();
-
- virtual bool hasASTSupport() const { return false; }
-
-public:
- FixItAction();
- ~FixItAction();
-};
-
class GeneratePCHAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
@@ -101,13 +73,7 @@ protected:
virtual bool usesCompleteTranslationUnit() { return false; }
- virtual bool hasASTSupport() const { return false; }
-};
-
-class HTMLPrintAction : public ASTFrontendAction {
-protected:
- virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile);
+ virtual bool hasASTFileSupport() const { return false; }
};
class InheritanceViewAction : public ASTFrontendAction {
@@ -116,12 +82,6 @@ protected:
llvm::StringRef InFile);
};
-class RewriteObjCAction : public ASTFrontendAction {
-protected:
- virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile);
-};
-
class SyntaxOnlyAction : public ASTFrontendAction {
protected:
virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
@@ -170,7 +130,7 @@ public:
virtual bool usesPreprocessorOnly() const;
virtual bool usesCompleteTranslationUnit();
virtual bool hasPCHSupport() const;
- virtual bool hasASTSupport() const;
+ virtual bool hasASTFileSupport() const;
virtual bool hasCodeCompletionSupport() const;
};
@@ -215,16 +175,6 @@ protected:
virtual bool hasPCHSupport() const { return true; }
};
-class RewriteMacrosAction : public PreprocessorFrontendAction {
-protected:
- void ExecuteAction();
-};
-
-class RewriteTestAction : public PreprocessorFrontendAction {
-protected:
- void ExecuteAction();
-};
-
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
index c43e680..4010ea6 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -11,6 +11,7 @@
#define LLVM_CLANG_FRONTEND_FRONTENDOPTIONS_H
#include "clang/Frontend/CommandLineSourceLoc.h"
+#include "clang/Frontend/FrontendAction.h"
#include "llvm/ADT/StringRef.h"
#include <string>
#include <vector>
@@ -55,27 +56,15 @@ namespace frontend {
/// FrontendOptions - Options for controlling the behavior of the frontend.
class FrontendOptions {
public:
- enum InputKind {
- IK_None,
- IK_Asm,
- IK_C,
- IK_CXX,
- IK_ObjC,
- IK_ObjCXX,
- IK_PreprocessedC,
- IK_PreprocessedCXX,
- IK_PreprocessedObjC,
- IK_PreprocessedObjCXX,
- IK_OpenCL,
- IK_AST
- };
-
unsigned DebugCodeCompletionPrinter : 1; ///< Use the debug printer for code
/// completion results.
unsigned DisableFree : 1; ///< Disable memory freeing on exit.
unsigned RelocatablePCH : 1; ///< When generating PCH files,
/// instruct the PCH writer to create
/// relocatable PCH files.
+ unsigned ChainedPCH : 1; ///< When generating PCH files,
+ /// instruct the PCH writer to create
+ /// chained PCH files.
unsigned ShowHelp : 1; ///< Show the -help text.
unsigned ShowMacrosInCodeCompletion : 1; ///< Show macros in code completion
/// results.
@@ -108,6 +97,9 @@ public:
/// The name of the action to run when using a plugin action.
std::string ActionName;
+ /// Arg to pass to the plugin
+ std::vector<std::string> PluginArgs;
+
/// The list of plugins to load.
std::vector<std::string> Plugins;
@@ -125,6 +117,7 @@ public:
ProgramAction = frontend::ParseSyntaxOnly;
ActionName = "";
RelocatablePCH = 0;
+ ChainedPCH = 0;
ShowHelp = 0;
ShowMacrosInCodeCompletion = 0;
ShowCodePatternsInCodeCompletion = 0;
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h
index 8341492..ec925ad 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h
@@ -16,7 +16,7 @@
namespace clang {
/// The frontend plugin registry.
-typedef llvm::Registry<FrontendAction> FrontendPluginRegistry;
+typedef llvm::Registry<PluginASTAction> FrontendPluginRegistry;
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PCHBitCodes.h b/contrib/llvm/tools/clang/include/clang/Frontend/PCHBitCodes.h
index 2493cfd..27a2b7d 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PCHBitCodes.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PCHBitCodes.h
@@ -30,10 +30,10 @@ namespace clang {
/// designed for the previous version could not support reading
/// the new version), this number should be increased.
///
- /// Version 3 of PCH files also requires that the version control branch and
+ /// Version 4 of PCH files also requires that the version control branch and
/// revision match exactly, since there is no backward compatibility of
/// PCH files at this time.
- const unsigned VERSION_MAJOR = 3;
+ const unsigned VERSION_MAJOR = 4;
/// \brief PCH minor version number supported by this version of
/// Clang.
@@ -47,7 +47,7 @@ namespace clang {
/// \brief An ID number that refers to a declaration in a PCH file.
///
- /// The ID numbers of types are consecutive (in order of
+ /// The ID numbers of declarations are consecutive (in order of
/// discovery) and start at 2. 0 is reserved for NULL, and 1 is
/// reserved for the translation unit declaration.
typedef uint32_t DeclID;
@@ -226,7 +226,18 @@ namespace clang {
/// \brief Record code for the table of offsets to macro definition
/// entries in the preprocessing record.
- MACRO_DEFINITION_OFFSETS = 23
+ MACRO_DEFINITION_OFFSETS = 23,
+
+ /// \brief Record code for the array of VTable uses.
+ VTABLE_USES = 24,
+
+ /// \brief Record code for the array of dynamic classes.
+ DYNAMIC_CLASSES = 25,
+
+ /// \brief Record code for the chained PCH metadata, including the
+ /// PCH version and the name of the PCH this is chained to.
+ CHAINED_METADATA = 26
+
};
/// \brief Record types used within a source manager block.
@@ -417,7 +428,17 @@ namespace clang {
/// \brief An InjectedClassNameType record.
TYPE_INJECTED_CLASS_NAME = 27,
/// \brief An ObjCObjectType record.
- TYPE_OBJC_OBJECT = 28
+ TYPE_OBJC_OBJECT = 28,
+ /// \brief An TemplateTypeParmType record.
+ TYPE_TEMPLATE_TYPE_PARM = 29,
+ /// \brief An TemplateSpecializationType record.
+ TYPE_TEMPLATE_SPECIALIZATION = 30,
+ /// \brief A DependentNameType record.
+ TYPE_DEPENDENT_NAME = 31,
+ /// \brief A DependentTemplateSpecializationType record.
+ TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION = 32,
+ /// \brief A DependentSizedArrayType record.
+ TYPE_DEPENDENT_SIZED_ARRAY = 33
};
/// \brief The type IDs for special types constructed by semantic
@@ -457,7 +478,9 @@ namespace clang {
/// \brief Objective-C "SEL" redefinition type
SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 14,
/// \brief NSConstantString type
- SPECIAL_TYPE_NS_CONSTANT_STRING = 15
+ SPECIAL_TYPE_NS_CONSTANT_STRING = 15,
+ /// \brief Whether __[u]int128_t identifier is installed.
+ SPECIAL_TYPE_INT128_INSTALLED = 16
};
/// \brief Record codes for each kind of declaration.
@@ -562,12 +585,13 @@ namespace clang {
DECL_CXX_DESTRUCTOR,
/// \brief A CXXConversionDecl record.
DECL_CXX_CONVERSION,
+ /// \brief An AccessSpecDecl record.
+ DECL_ACCESS_SPEC,
// FIXME: Implement serialization for these decl types. This just
// allocates the order in which
DECL_FRIEND,
DECL_FRIEND_TEMPLATE,
- DECL_TEMPLATE,
DECL_CLASS_TEMPLATE,
DECL_CLASS_TEMPLATE_SPECIALIZATION,
DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION,
@@ -641,6 +665,8 @@ namespace clang {
EXPR_CHARACTER_LITERAL,
/// \brief A ParenExpr record.
EXPR_PAREN,
+ /// \brief A ParenListExpr record.
+ EXPR_PAREN_LIST,
/// \brief A UnaryOperator record.
EXPR_UNARY_OPERATOR,
/// \brief An OffsetOfExpr record.
@@ -736,6 +762,8 @@ namespace clang {
EXPR_CXX_MEMBER_CALL,
/// \brief A CXXConstructExpr record.
EXPR_CXX_CONSTRUCT,
+ /// \brief A CXXTemporaryObjectExpr record.
+ EXPR_CXX_TEMPORARY_OBJECT,
// \brief A CXXStaticCastExpr record.
EXPR_CXX_STATIC_CAST,
// \brief A CXXDynamicCastExpr record.
@@ -755,11 +783,22 @@ namespace clang {
EXPR_CXX_THROW, // CXXThrowExpr
EXPR_CXX_DEFAULT_ARG, // CXXDefaultArgExpr
EXPR_CXX_BIND_TEMPORARY, // CXXBindTemporaryExpr
- //
- EXPR_CXX_ZERO_INIT_VALUE, // CXXZeroInitValueExpr
+ EXPR_CXX_BIND_REFERENCE, // CXXBindReferenceExpr
+
+ EXPR_CXX_SCALAR_VALUE_INIT, // CXXScalarValueInitExpr
EXPR_CXX_NEW, // CXXNewExpr
+ EXPR_CXX_DELETE, // CXXDeleteExpr
+ EXPR_CXX_PSEUDO_DESTRUCTOR, // CXXPseudoDestructorExpr
+
+ EXPR_CXX_EXPR_WITH_TEMPORARIES, // CXXExprWithTemporaries
- EXPR_CXX_EXPR_WITH_TEMPORARIES // CXXExprWithTemporaries
+ EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
+ EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
+ EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
+ EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
+ EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
+
+ EXPR_CXX_UNARY_TYPE_TRAIT // UnaryTypeTraitExpr
};
/// \brief The kinds of designators that can occur in a
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PCHDeserializationListener.h b/contrib/llvm/tools/clang/include/clang/Frontend/PCHDeserializationListener.h
new file mode 100644
index 0000000..c9b90e2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PCHDeserializationListener.h
@@ -0,0 +1,36 @@
+//===- PCHDeserializationListener.h - Decl/Type PCH Read Events -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PCHDeserializationListener class, which is notified
+// by the PCHReader whenever a type or declaration is deserialized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_PCH_DESERIALIZATION_LISTENER_H
+#define LLVM_CLANG_FRONTEND_PCH_DESERIALIZATION_LISTENER_H
+
+#include "clang/Frontend/PCHBitCodes.h"
+
+namespace clang {
+
+class Decl;
+class QualType;
+
+class PCHDeserializationListener {
+protected:
+ ~PCHDeserializationListener() {}
+
+public:
+ virtual void TypeRead(pch::TypeID ID, QualType T) = 0;
+ virtual void DeclRead(pch::DeclID ID, const Decl *D) = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PCHReader.h b/contrib/llvm/tools/clang/include/clang/Frontend/PCHReader.h
index e144738..47e871f 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PCHReader.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PCHReader.h
@@ -59,12 +59,22 @@ class GotoStmt;
class LabelStmt;
class MacroDefinition;
class NamedDecl;
+class PCHDeserializationListener;
class Preprocessor;
class Sema;
class SwitchCase;
class PCHReader;
struct HeaderFileInfo;
+struct PCHPredefinesBlock {
+ /// \brief The file ID for this predefines buffer in a PCH file.
+ FileID BufferID;
+
+ /// \brief This predefines buffer in a PCH file.
+ llvm::StringRef Data;
+};
+typedef llvm::SmallVector<PCHPredefinesBlock, 2> PCHPredefinesBlocks;
+
/// \brief Abstract interface for callback invocations by the PCHReader.
///
/// While reading a PCH file, the PCHReader will call the methods of the
@@ -91,10 +101,7 @@ public:
/// \brief Receives the contents of the predefines buffer.
///
- /// \param PCHPredef The start of the predefines buffer in the PCH
- /// file.
- ///
- /// \param PCHBufferID The FileID for the PCH predefines buffer.
+ /// \param Buffers Information about the predefines buffers.
///
/// \param OriginalFileName The original file name for the PCH, which will
/// appear as an entry in the predefines buffer.
@@ -103,8 +110,7 @@ public:
/// here.
///
/// \returns true to indicate the predefines are invalid or false otherwise.
- virtual bool ReadPredefinesBuffer(llvm::StringRef PCHPredef,
- FileID PCHBufferID,
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
llvm::StringRef OriginalFileName,
std::string &SuggestedPredefines) {
return false;
@@ -131,8 +137,7 @@ public:
virtual bool ReadLanguageOptions(const LangOptions &LangOpts);
virtual bool ReadTargetTriple(llvm::StringRef Triple);
- virtual bool ReadPredefinesBuffer(llvm::StringRef PCHPredef,
- FileID PCHBufferID,
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
llvm::StringRef OriginalFileName,
std::string &SuggestedPredefines);
virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID);
@@ -165,9 +170,12 @@ public:
enum PCHReadResult { Success, Failure, IgnorePCH };
friend class PCHValidator;
private:
- /// \ brief The receiver of some callbacks invoked by PCHReader.
+ /// \brief The receiver of some callbacks invoked by PCHReader.
llvm::OwningPtr<PCHReaderListener> Listener;
+ /// \brief The receiver of deserialization events.
+ PCHDeserializationListener *DeserializationListener;
+
SourceManager &SourceMgr;
FileManager &FileMgr;
Diagnostic &Diags;
@@ -321,7 +329,7 @@ private:
/// file.
llvm::SmallVector<uint64_t, 16> TentativeDefinitions;
- /// \brief The set of tentative definitions stored in the the PCH
+ /// \brief The set of unused static functions stored in the the PCH
/// file.
llvm::SmallVector<uint64_t, 16> UnusedStaticFuncs;
@@ -333,6 +341,12 @@ private:
/// PCH file.
llvm::SmallVector<uint64_t, 4> ExtVectorDecls;
+ /// \brief The set of VTable uses of CXXRecordDecls stored in the PCH file.
+ llvm::SmallVector<uint64_t, 64> VTableUses;
+
+ /// \brief The set of dynamic CXXRecord declarations stored in the PCH file.
+ llvm::SmallVector<uint64_t, 16> DynamicClasses;
+
/// \brief The set of Objective-C category definitions stored in the
/// the PCH file.
llvm::SmallVector<uint64_t, 4> ObjCCategoryImpls;
@@ -447,17 +461,39 @@ private:
/// "Interesting" declarations are those that have data that may
/// need to be emitted, such as inline function definitions or
/// Objective-C protocols.
- llvm::SmallVector<Decl *, 16> InterestingDecls;
+ std::deque<Decl *> InterestingDecls;
- /// \brief The file ID for the predefines buffer in the PCH file.
- FileID PCHPredefinesBufferID;
+ /// \brief When reading a Stmt tree, Stmt operands are placed in this stack.
+ llvm::SmallVector<Stmt *, 16> StmtStack;
- /// \brief Pointer to the beginning of the predefines buffer in the
- /// PCH file.
- const char *PCHPredefines;
+ /// \brief What kind of records we are reading.
+ enum ReadingKind {
+ Read_Decl, Read_Type, Read_Stmt
+ };
- /// \brief Length of the predefines buffer in the PCH file.
- unsigned PCHPredefinesLen;
+ /// \brief What kind of records we are reading.
+ ReadingKind ReadingKind;
+
+ /// \brief RAII object to change the reading kind.
+ class ReadingKindTracker {
+ PCHReader &Reader;
+ enum ReadingKind PrevKind;
+
+ ReadingKindTracker(const ReadingKindTracker&); // do not implement
+ ReadingKindTracker &operator=(const ReadingKindTracker&);// do not implement
+
+ public:
+ ReadingKindTracker(enum ReadingKind newKind, PCHReader &reader)
+ : Reader(reader), PrevKind(Reader.ReadingKind) {
+ Reader.ReadingKind = newKind;
+ }
+
+ ~ReadingKindTracker() { Reader.ReadingKind = PrevKind; }
+ };
+
+ /// \brief All predefines buffers in all PCH files, to be treated as if
+ /// concatenated.
+ PCHPredefinesBlocks PCHPredefinesBuffers;
/// \brief Suggested contents of the predefines buffer, after this
/// PCH file has been processed.
@@ -469,10 +505,13 @@ private:
/// predefines buffer may contain additional definitions.
std::string SuggestedPredefines;
+ /// \brief Reads a statement from the specified cursor.
+ Stmt *ReadStmtFromStream(llvm::BitstreamCursor &Cursor);
+
void MaybeAddSystemRootToFilename(std::string &Filename);
PCHReadResult ReadPCHBlock();
- bool CheckPredefinesBuffer(llvm::StringRef PCHPredef, FileID PCHBufferID);
+ bool CheckPredefinesBuffers();
bool ParseLineTable(llvm::SmallVectorImpl<uint64_t> &Record);
PCHReadResult ReadSourceManagerBlock();
PCHReadResult ReadSLocEntryRecord(unsigned ID);
@@ -482,6 +521,8 @@ private:
void LoadedDecl(unsigned Index, Decl *D);
Decl *ReadDeclRecord(uint64_t Offset, unsigned Index);
+ void PassInterestingDeclsToConsumer();
+
/// \brief Produce an error diagnostic and return true.
///
/// This routine should only be used for fatal errors that have to
@@ -537,6 +578,10 @@ public:
Listener.reset(listener);
}
+ void setDeserializationListener(PCHDeserializationListener *Listener) {
+ DeserializationListener = Listener;
+ }
+
/// \brief Set the Preprocessor to use.
void setPreprocessor(Preprocessor &pp);
@@ -544,7 +589,7 @@ public:
void InitializeContext(ASTContext &Context);
/// \brief Retrieve the name of the PCH file
- const std::string &getFileName() { return FileName; }
+ const std::string &getFileName() const { return FileName; }
/// \brief Retrieve the name of the original source file name
const std::string &getOriginalSourceFile() { return OriginalFileName; }
@@ -563,36 +608,61 @@ public:
/// \brief Read preprocessed entities into the
virtual void ReadPreprocessedEntities();
+ /// \brief Returns the number of types found in this file.
+ unsigned getTotalNumTypes() const {
+ return static_cast<unsigned>(TypesLoaded.size());
+ }
+
+ /// \brief Returns the number of declarations found in this file.
+ unsigned getTotalNumDecls() const {
+ return static_cast<unsigned>(DeclsLoaded.size());
+ }
+
/// \brief Reads a TemplateArgumentLocInfo appropriate for the
/// given TemplateArgument kind.
TemplateArgumentLocInfo
GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
const RecordData &Record, unsigned &Idx);
+ /// \brief Reads a TemplateArgumentLoc.
+ TemplateArgumentLoc ReadTemplateArgumentLoc(const RecordData &Record,
+ unsigned &Idx);
+
/// \brief Reads a declarator info from the given record.
- virtual TypeSourceInfo *GetTypeSourceInfo(const RecordData &Record,
- unsigned &Idx);
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Resolve and return the translation unit declaration.
+ TranslationUnitDecl *GetTranslationUnitDecl();
/// \brief Resolve a type ID into a type, potentially building a new
/// type.
- virtual QualType GetType(pch::TypeID ID);
+ QualType GetType(pch::TypeID ID);
/// \brief Resolve a declaration ID into a declaration, potentially
/// building a new declaration.
- virtual Decl *GetDecl(pch::DeclID ID);
+ Decl *GetDecl(pch::DeclID ID);
+ virtual Decl *GetExternalDecl(uint32_t ID);
/// \brief Resolve the offset of a statement into a statement.
///
/// This operation will read a new statement from the external
/// source each time it is called, and is meant to be used via a
/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
- virtual Stmt *GetDeclStmt(uint64_t Offset);
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset);
/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
/// specified cursor. Read the abbreviations that are at the top of the block
/// and then leave the cursor pointing into the block.
bool ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, unsigned BlockID);
+ /// \brief Finds all the visible declarations with a given name.
+ /// The current implementation of this method just loads the entire
+ /// lookup table as unmaterialized references.
+ virtual DeclContext::lookup_result
+ FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name);
+
/// \brief Read all of the declarations lexically stored in a
/// declaration context.
///
@@ -606,27 +676,8 @@ public:
///
/// \returns true if there was an error while reading the
/// declarations for this declaration context.
- virtual bool ReadDeclsLexicallyInContext(DeclContext *DC,
- llvm::SmallVectorImpl<pch::DeclID> &Decls);
-
- /// \brief Read all of the declarations visible from a declaration
- /// context.
- ///
- /// \param DC The declaration context whose visible declarations
- /// will be read.
- ///
- /// \param Decls A vector of visible declaration structures,
- /// providing the mapping from each name visible in the declaration
- /// context to the declaration IDs of declarations with that name.
- ///
- /// \returns true if there was an error while reading the
- /// declarations for this declaration context.
- ///
- /// FIXME: Using this intermediate data structure results in an
- /// extraneous copying of the data. Could we pass in a reference to
- /// the StoredDeclsMap instead?
- virtual bool ReadDeclsVisibleInContext(DeclContext *DC,
- llvm::SmallVectorImpl<VisibleDeclaration> & Decls);
+ virtual bool FindExternalLexicalDecls(const DeclContext *DC,
+ llvm::SmallVectorImpl<Decl*> &Decls);
/// \brief Function that will be invoked when we begin parsing a new
/// translation unit involving this external AST source.
@@ -691,8 +742,8 @@ public:
Selector DecodeSelector(unsigned Idx);
- virtual Selector GetSelector(uint32_t ID);
- virtual uint32_t GetNumKnownSelectors();
+ virtual Selector GetExternalSelector(uint32_t ID);
+ uint32_t GetNumExternalSelectors();
Selector GetSelector(const RecordData &Record, unsigned &Idx) {
return DecodeSelector(Record[Idx++]);
@@ -704,6 +755,28 @@ public:
NestedNameSpecifier *ReadNestedNameSpecifier(const RecordData &Record,
unsigned &Idx);
+ /// \brief Read a template name.
+ TemplateName ReadTemplateName(const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a template argument.
+ TemplateArgument ReadTemplateArgument(const RecordData &Record,unsigned &Idx);
+
+ /// \brief Read a template parameter list.
+ TemplateParameterList *ReadTemplateParameterList(const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a template argument array.
+ void
+ ReadTemplateArgumentList(llvm::SmallVector<TemplateArgument, 8> &TemplArgs,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a UnresolvedSet structure.
+ void ReadUnresolvedSet(UnresolvedSetImpl &Set,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a C++ base specifier.
+ CXXBaseSpecifier ReadCXXBaseSpecifier(const RecordData &Record,unsigned &Idx);
+
/// \brief Read a source location.
SourceLocation ReadSourceLocation(const RecordData &Record, unsigned& Idx) {
return SourceLocation::getFromRawEncoding(Record[Idx++]);
@@ -729,20 +802,25 @@ public:
/// \brief Reads attributes from the current stream position.
Attr *ReadAttributes();
- /// \brief ReadDeclExpr - Reads an expression from the current decl cursor.
- Expr *ReadDeclExpr();
+ /// \brief Reads a statement.
+ Stmt *ReadStmt();
- /// \brief ReadTypeExpr - Reads an expression from the current type cursor.
- Expr *ReadTypeExpr();
+ /// \brief Reads an expression.
+ Expr *ReadExpr();
- /// \brief Reads a statement from the specified cursor.
- Stmt *ReadStmt(llvm::BitstreamCursor &Cursor);
-
- /// \brief Read a statement from the current DeclCursor.
- Stmt *ReadDeclStmt() {
- return ReadStmt(DeclsCursor);
+ /// \brief Reads a sub-statement operand during statement reading.
+ Stmt *ReadSubStmt() {
+ assert(ReadingKind == Read_Stmt &&
+ "Should be called only during statement reading!");
+ // Subexpressions are stored from last to first, so the next Stmt we need
+ // is at the back of the stack.
+ assert(!StmtStack.empty() && "Read too many sub statements!");
+ return StmtStack.pop_back_val();
}
+ /// \brief Reads a sub-expression operand during statement reading.
+ Expr *ReadSubExpr();
+
/// \brief Reads the macro record located at the given offset.
void ReadMacroRecord(uint64_t Offset);
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PCHWriter.h b/contrib/llvm/tools/clang/include/clang/Frontend/PCHWriter.h
index 85f53b9..70ad1d7 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/PCHWriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PCHWriter.h
@@ -17,7 +17,9 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/TemplateBase.h"
#include "clang/Frontend/PCHBitCodes.h"
+#include "clang/Frontend/PCHDeserializationListener.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <map>
@@ -38,6 +40,7 @@ class CXXBaseOrMemberInitializer;
class LabelStmt;
class MacroDefinition;
class MemorizeStatCalls;
+class PCHReader;
class Preprocessor;
class Sema;
class SourceManager;
@@ -69,7 +72,7 @@ struct UnsafeQualTypeDenseMapInfo {
/// representation of a given abstract syntax tree and its supporting
/// data structures. This bitstream can be de-serialized via an
/// instance of the PCHReader class.
-class PCHWriter {
+class PCHWriter : public PCHDeserializationListener {
public:
typedef llvm::SmallVector<uint64_t, 64> RecordData;
@@ -77,6 +80,9 @@ private:
/// \brief The bitstream writer used to emit this precompiled header.
llvm::BitstreamWriter &Stream;
+ /// \brief The reader of existing PCH files, if we're chaining.
+ PCHReader *Chain;
+
/// \brief Stores a declaration or a type to be written to the PCH file.
class DeclOrType {
public:
@@ -188,7 +194,11 @@ private:
/// \brief Statements that we've encountered while serializing a
/// declaration or type.
- llvm::SmallVector<Stmt *, 8> StmtsToEmit;
+ llvm::SmallVector<Stmt *, 16> StmtsToEmit;
+
+ /// \brief Statements collection to use for PCHWriter::AddStmt().
+ /// It will point to StmtsToEmit unless it is overriden.
+ llvm::SmallVector<Stmt *, 16> *CollectedStmts;
/// \brief Mapping from SwitchCase statements to IDs.
std::map<SwitchCase *, unsigned> SwitchCaseIDs;
@@ -210,10 +220,13 @@ private:
/// file.
unsigned NumVisibleDeclContexts;
+ /// \brief Write the given subexpression to the bitstream.
+ void WriteSubStmt(Stmt *S);
+
void WriteBlockInfoBlock();
void WriteMetadata(ASTContext &Context, const char *isysroot);
void WriteLanguageOptions(const LangOptions &LangOpts);
- void WriteStatCache(MemorizeStatCalls &StatCalls, const char* isysroot);
+ void WriteStatCache(MemorizeStatCalls &StatCalls);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP,
const char* isysroot);
@@ -229,11 +242,16 @@ private:
unsigned ParmVarDeclAbbrev;
void WriteDeclsBlockAbbrevs();
void WriteDecl(ASTContext &Context, Decl *D);
+
+ void WritePCHCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const char* isysroot);
+ void WritePCHChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const char* isysroot);
public:
/// \brief Create a new precompiled header writer that outputs to
/// the given bitstream.
- PCHWriter(llvm::BitstreamWriter &Stream);
+ PCHWriter(llvm::BitstreamWriter &Stream, PCHReader *Chain);
/// \brief Write a precompiled header for the given semantic analysis.
///
@@ -299,6 +317,11 @@ public:
/// \brief Emits a reference to a declarator info.
void AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record);
+ /// \brief Emits a template argument location info.
+ void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
+ const TemplateArgumentLocInfo &Arg,
+ RecordData &Record);
+
/// \brief Emits a template argument location.
void AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
RecordData &Record);
@@ -315,6 +338,26 @@ public:
/// \brief Emit a nested name specifier.
void AddNestedNameSpecifier(NestedNameSpecifier *NNS, RecordData &Record);
+
+ /// \brief Emit a template name.
+ void AddTemplateName(TemplateName Name, RecordData &Record);
+
+ /// \brief Emit a template argument.
+ void AddTemplateArgument(const TemplateArgument &Arg, RecordData &Record);
+
+ /// \brief Emit a template parameter list.
+ void AddTemplateParameterList(const TemplateParameterList *TemplateParams,
+ RecordData &Record);
+
+ /// \brief Emit a template argument list.
+ void AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
+ RecordData &Record);
+
+ /// \brief Emit a UnresolvedSet structure.
+ void AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record);
+
+ /// brief Emit a C++ base specifier.
+ void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, RecordData &Record);
/// \brief Add a string to the given record.
void AddString(const std::string &Str, RecordData &Record);
@@ -335,10 +378,9 @@ public:
/// type or declaration has been written, call FlushStmts() to write
/// the corresponding statements just after the type or
/// declaration.
- void AddStmt(Stmt *S) { StmtsToEmit.push_back(S); }
-
- /// \brief Write the given subexpression to the bitstream.
- void WriteSubStmt(Stmt *S);
+ void AddStmt(Stmt *S) {
+ CollectedStmts->push_back(S);
+ }
/// \brief Flush all of the statements and expressions that have
/// been added to the queue via AddStmt().
@@ -355,6 +397,10 @@ public:
unsigned GetLabelID(LabelStmt *S);
unsigned getParmVarDeclAbbrev() const { return ParmVarDeclAbbrev; }
+
+ // PCHDeserializationListener implementation
+ void TypeRead(pch::TypeID ID, QualType T);
+ void DeclRead(pch::DeclID ID, const Decl *D);
};
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
index ec4392f..f530294 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -18,14 +18,9 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
-namespace llvm {
- class raw_ostream;
-}
-
namespace clang {
class DiagnosticOptions;
class LangOptions;
-class SourceManager;
class TextDiagnosticPrinter : public DiagnosticClient {
llvm::raw_ostream &OS;
@@ -60,14 +55,14 @@ public:
void PrintIncludeStack(SourceLocation Loc, const SourceManager &SM);
- void HighlightRange(const SourceRange &R,
+ void HighlightRange(const CharSourceRange &R,
const SourceManager &SrcMgr,
unsigned LineNo, FileID FID,
std::string &CaretLine,
const std::string &SourceLine);
void EmitCaretDiagnostic(SourceLocation Loc,
- SourceRange *Ranges, unsigned NumRanges,
+ CharSourceRange *Ranges, unsigned NumRanges,
const SourceManager &SM,
const FixItHint *Hints,
unsigned NumHints,
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def b/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
index 069d718..e8cb4a6 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TypeXML.def
@@ -253,6 +253,11 @@ NODE_XML(DependentNameType, "DependentNameType")
ID_ATTRIBUTE_XML
END_NODE_XML
+NODE_XML(DependentTemplateSpecializationType,
+ "DependentTemplateSpecializationType")
+ ID_ATTRIBUTE_XML
+END_NODE_XML
+
NODE_XML(ObjCInterfaceType, "ObjCInterfaceType")
ID_ATTRIBUTE_XML
END_NODE_XML
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
index c1d4831..f37cc01 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
@@ -65,12 +65,6 @@ void ProcessWarningOptions(Diagnostic &Diags, const DiagnosticOptions &Opts);
void DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream* OS,
const PreprocessorOutputOptions &Opts);
-/// RewriteMacrosInInput - Implement -rewrite-macros mode.
-void RewriteMacrosInInput(Preprocessor &PP, llvm::raw_ostream* OS);
-
-/// RewriteMacrosInInput - A simple test for the TokenRewriter class.
-void DoRewriteTest(Preprocessor &PP, llvm::raw_ostream* OS);
-
/// CreatePrintParserActionsAction - Return the actions implementation that
/// implements the -parse-print-callbacks option.
MinimalAction *CreatePrintParserActionsAction(Preprocessor &PP,
diff --git a/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h b/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h
index 5edfe6f..336bf47 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/CallGraph.h
@@ -54,7 +54,7 @@ public:
class CallGraph {
/// Program manages all Entities.
- idx::Program Prog;
+ idx::Program &Prog;
typedef std::map<idx::Entity, CallGraphNode *> FunctionMapTy;
@@ -71,7 +71,7 @@ class CallGraph {
CallGraphNode *ExternalCallingNode;
public:
- CallGraph();
+ CallGraph(idx::Program &P);
~CallGraph();
typedef FunctionMapTy::iterator iterator;
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Entity.h b/contrib/llvm/tools/clang/include/clang/Index/Entity.h
index c2aab62..9863963 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/Entity.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/Entity.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
#include <string>
namespace clang {
@@ -71,6 +72,9 @@ public:
/// \returns invalid Entity if an Entity cannot refer to this Decl.
static Entity get(Decl *D, Program &Prog);
+ /// \brief Get an Entity associated with a name in the global namespace.
+ static Entity get(llvm::StringRef Name, Program &Prog);
+
/// \brief true if the Entity is not visible outside the trasnlation unit.
bool isInternalToTU() const {
assert(isValid() && "This Entity is not valid!");
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Indexer.h b/contrib/llvm/tools/clang/include/clang/Index/Indexer.h
index 361e729..96c585d 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/Indexer.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/Indexer.h
@@ -23,6 +23,7 @@
namespace clang {
class ASTContext;
+ class FunctionDecl;
namespace idx {
class Program;
@@ -35,6 +36,7 @@ public:
typedef llvm::DenseMap<ASTContext *, TranslationUnit *> CtxTUMapTy;
typedef std::map<Entity, TUSetTy> MapTy;
typedef std::map<GlobalSelector, TUSetTy> SelMapTy;
+ typedef std::map<Entity, std::pair<FunctionDecl*,TranslationUnit*> > DefMapTy;
explicit Indexer(Program &prog) :
Prog(prog) { }
@@ -49,10 +51,15 @@ public:
virtual void GetTranslationUnitsFor(GlobalSelector Sel,
TranslationUnitHandler &Handler);
+ std::pair<FunctionDecl*, TranslationUnit*> getDefinitionFor(Entity Ent);
+
private:
Program &Prog;
MapTy Map;
+ // Map a function Entity to the its definition.
+ DefMapTy DefMap;
+
CtxTUMapTy CtxTUMap;
SelMapTy SelMap;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h b/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h
index bf9e78f..b86ba3e 100644
--- a/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h
+++ b/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h
@@ -16,6 +16,7 @@
namespace clang {
class ASTContext;
+ class Preprocessor;
namespace idx {
class DeclReferenceMap;
@@ -26,6 +27,7 @@ class TranslationUnit {
public:
virtual ~TranslationUnit();
virtual ASTContext &getASTContext() = 0;
+ virtual Preprocessor &getPreprocessor() = 0;
virtual DeclReferenceMap &getDeclReferenceMap() = 0;
virtual SelectorMap &getSelectorMap() = 0;
};
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
index d74124e..99fe29b 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -16,6 +16,7 @@
#include "clang/Lex/DirectoryLookup.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
#include <string>
namespace clang {
@@ -70,6 +71,12 @@ public:
const std::string &Str) {
}
+ /// PragmaMessage - This callback is invoked when a #pragma message directive
+ /// is read.
+ ///
+ virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str) {
+ }
+
/// MacroExpands - This is called by
/// Preprocessor::HandleMacroExpandedIdentifier when a macro invocation is
/// found.
@@ -127,6 +134,11 @@ public:
Second->PragmaComment(Loc, Kind, Str);
}
+ virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str) {
+ First->PragmaMessage(Loc, Str);
+ Second->PragmaMessage(Loc, Str);
+ }
+
virtual void MacroExpands(const Token &Id, const MacroInfo* MI) {
First->MacroExpands(Id, MI);
Second->MacroExpands(Id, MI);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
index ef367fe..c68555b 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
@@ -14,6 +14,8 @@
#ifndef LLVM_CLANG_PRAGMA_H
#define LLVM_CLANG_PRAGMA_H
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <vector>
@@ -33,12 +35,13 @@ namespace clang {
/// we treat "#pragma STDC" and "#pragma GCC" as namespaces that contain other
/// pragmas.
class PragmaHandler {
- const IdentifierInfo *Name;
+ std::string Name;
public:
- PragmaHandler(const IdentifierInfo *name) : Name(name) {}
+ explicit PragmaHandler(llvm::StringRef name) : Name(name) {}
+ PragmaHandler() {}
virtual ~PragmaHandler();
- const IdentifierInfo *getName() const { return Name; }
+ llvm::StringRef getName() const { return Name; }
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
@@ -46,30 +49,38 @@ public:
virtual PragmaNamespace *getIfNamespace() { return 0; }
};
+/// EmptyPragmaHandler - A pragma handler which takes no action, which can be
+/// used to ignore particular pragmas.
+class EmptyPragmaHandler : public PragmaHandler {
+public:
+ EmptyPragmaHandler();
+
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+};
+
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
/// allowing hierarchical pragmas to be defined. Common examples of namespaces
/// are "#pragma GCC", "#pragma STDC", and "#pragma omp", but any namespaces may
/// be (potentially recursively) defined.
class PragmaNamespace : public PragmaHandler {
- /// Handlers - This is the list of handlers in this namespace.
+ /// Handlers - This is a map of the handlers in this namespace with their name
+ /// as key.
///
- std::vector<PragmaHandler*> Handlers;
+ llvm::StringMap<PragmaHandler*> Handlers;
public:
- PragmaNamespace(const IdentifierInfo *Name) : PragmaHandler(Name) {}
+ explicit PragmaNamespace(llvm::StringRef Name) : PragmaHandler(Name) {}
virtual ~PragmaNamespace();
/// FindHandler - Check to see if there is already a handler for the
- /// specified name. If not, return the handler for the null identifier if it
+ /// specified name. If not, return the handler for the null name if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
- PragmaHandler *FindHandler(const IdentifierInfo *Name,
+ PragmaHandler *FindHandler(llvm::StringRef Name,
bool IgnoreNull = true) const;
/// AddPragma - Add a pragma to this namespace.
///
- void AddPragma(PragmaHandler *Handler) {
- Handlers.push_back(Handler);
- }
+ void AddPragma(PragmaHandler *Handler);
/// RemovePragmaHandler - Remove the given handler from the
/// namespace.
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
index f01b3af..1ee4bb6 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -340,13 +340,19 @@ public:
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
- void AddPragmaHandler(const char *Namespace, PragmaHandler *Handler);
+ void AddPragmaHandler(llvm::StringRef Namespace, PragmaHandler *Handler);
+ void AddPragmaHandler(PragmaHandler *Handler) {
+ AddPragmaHandler(llvm::StringRef(), Handler);
+ }
/// RemovePragmaHandler - Remove the specific pragma handler from
/// the preprocessor. If \arg Namespace is non-null, then it should
/// be the namespace that \arg Handler was added to. It is an error
/// to remove a handler that has not been registered.
- void RemovePragmaHandler(const char *Namespace, PragmaHandler *Handler);
+ void RemovePragmaHandler(llvm::StringRef Namespace, PragmaHandler *Handler);
+ void RemovePragmaHandler(PragmaHandler *Handler) {
+ RemovePragmaHandler(llvm::StringRef(), Handler);
+ }
/// \brief Add the specified comment handler to the preprocessor.
void AddCommentHandler(CommentHandler *Handler);
@@ -871,7 +877,11 @@ private:
//===--------------------------------------------------------------------===//
// Caching stuff.
void CachingLex(Token &Result);
- bool InCachingLexMode() const { return CurPPLexer == 0 && CurTokenLexer == 0;}
+ bool InCachingLexMode() const {
+ // If the Lexer pointers are 0 and IncludeMacroStack is empty, it means
+ // that we are past EOF, not that we are in CachingLex mode.
+ return CurPPLexer == 0 && CurTokenLexer == 0 && !IncludeMacroStack.empty();
+ }
void EnterCachingLexMode();
void ExitCachingLexMode() {
if (InCachingLexMode())
@@ -918,6 +928,7 @@ public:
void HandlePragmaSystemHeader(Token &SysHeaderTok);
void HandlePragmaDependency(Token &DependencyTok);
void HandlePragmaComment(Token &CommentTok);
+ void HandlePragmaMessage(Token &MessageTok);
// Return true and store the first token only if any CommentHandler
// has inserted some tokens and getCommentRetentionState() is false.
bool HandleComment(Token &Token, SourceRange Comment);
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
index b5dde9a..bd9b468 100644
--- a/contrib/llvm/tools/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -148,6 +148,7 @@ public:
Kind = tok::unknown;
Flags = 0;
PtrData = 0;
+ UintData = 0;
Loc = SourceLocation();
}
@@ -169,7 +170,7 @@ public:
}
void setLiteralData(const char *Ptr) {
assert(isLiteral() && "Cannot set literal data of non-literal");
- PtrData = (void*)Ptr;
+ PtrData = const_cast<char*>(Ptr);
}
void *getAnnotationValue() const {
@@ -254,4 +255,9 @@ struct PPConditionalInfo {
} // end namespace clang
+namespace llvm {
+ template <>
+ struct isPodLike<clang::Token> { static const bool value = true; };
+} // end namespace llvm
+
#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Makefile b/contrib/llvm/tools/clang/include/clang/Makefile
index 6abe375..e366e4e 100644
--- a/contrib/llvm/tools/clang/include/clang/Makefile
+++ b/contrib/llvm/tools/clang/include/clang/Makefile
@@ -1,7 +1,7 @@
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
DIRS := AST Basic Driver
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
install-local::
$(Echo) Installing Clang include files
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Action.h b/contrib/llvm/tools/clang/include/clang/Parse/Action.h
index e21da81..9cb47aa 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Action.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Action.h
@@ -64,7 +64,21 @@ namespace clang {
/// parse to complete accurately. The MinimalAction class does this
/// bare-minimum of tracking to implement this functionality.
class Action : public ActionBase {
+ /// \brief The parser's current scope.
+ ///
+ /// The parser maintains this state here so that is accessible to \c Action
+ /// subclasses via \c getCurScope().
+ Scope *CurScope;
+
+protected:
+ friend class Parser;
+
+ /// \brief Retrieve the parser's current scope.
+ Scope *getCurScope() const { return CurScope; }
+
public:
+ Action() : CurScope(0) { }
+
/// Out-of-line virtual destructor to provide home for this class.
virtual ~Action();
@@ -1637,16 +1651,39 @@ public:
return move(SubExpr);
}
- /// ActOnCXXNew - Parsed a C++ 'new' expression. UseGlobal is true if the
- /// new was qualified (::new). In a full new like
- /// @code new (p1, p2) type(c1, c2) @endcode
- /// the p1 and p2 expressions will be in PlacementArgs and the c1 and c2
- /// expressions in ConstructorArgs. The type is passed as a declarator.
+ /// \brief Parsed a C++ 'new' expression.
+ ///
+ /// \param StartLoc The start of the new expression, which is either the
+ /// "new" keyword or the "::" preceding it, depending on \p UseGlobal.
+ ///
+ /// \param UseGlobal True if the "new" was qualified with "::".
+ ///
+ /// \param PlacementLParen The location of the opening parenthesis ('(') for
+ /// the placement arguments, if any.
+ ///
+ /// \param PlacementArgs The placement arguments, if any.
+ ///
+ /// \param PlacementRParen The location of the closing parenthesis (')') for
+ /// the placement arguments, if any.
+ ///
+ /// \param TypeIdParens If the type was expressed as a type-id in parentheses,
+ /// the source range covering the parenthesized type-id.
+ ///
+ /// \param D The parsed declarator, which may include an array size (for
+ /// array new) as the first declarator.
+ ///
+ /// \param ConstructorLParen The location of the opening parenthesis ('(') for
+ /// the constructor arguments, if any.
+ ///
+ /// \param ConstructorArgs The constructor arguments, if any.
+ ///
+ /// \param ConstructorRParen The location of the closing parenthesis (')') for
+ /// the constructor arguments, if any.
virtual OwningExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId, Declarator &D,
+ SourceRange TypeIdParens, Declarator &D,
SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
SourceLocation ConstructorRParen) {
@@ -1769,6 +1806,15 @@ public:
unsigned NumBases) {
}
+ /// ActOnAccessSpecifier - This is invoked when an access specifier
+ /// (and the colon following it) is found during the parsing of a
+ /// C++ class member declarator.
+ virtual DeclPtrTy ActOnAccessSpecifier(AccessSpecifier AS,
+ SourceLocation ASLoc,
+ SourceLocation ColonLoc) {
+ return DeclPtrTy();
+ }
+
/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
/// declarator is parsed. 'AS' is the access specifier, 'BitfieldWidth'
/// specifies the bitfield width if there is one and 'Init' specifies the
@@ -1824,46 +1870,87 @@ public:
//===---------------------------C++ Templates----------------------------===//
- /// ActOnTypeParameter - Called when a C++ template type parameter
- /// (e.g., "typename T") has been parsed. Typename specifies whether
- /// the keyword "typename" was used to declare the type parameter
- /// (otherwise, "class" was used), ellipsis specifies whether this is a
- /// C++0x parameter pack, EllipsisLoc specifies the start of the ellipsis,
- /// and KeyLoc is the location of the "class" or "typename" keyword.
- // ParamName is the name of the parameter (NULL indicates an unnamed template
- // parameter) and ParamNameLoc is the location of the parameter name (if any)
- /// If the type parameter has a default argument, it will be added
- /// later via ActOnTypeParameterDefault. Depth and Position provide
- /// the number of enclosing templates (see
- /// ActOnTemplateParameterList) and the number of previous
- /// parameters within this template parameter list.
+ /// \brief Called when a C++ template type parameter(e.g., "typename T") has
+ /// been parsed.
+ ///
+ /// Given
+ ///
+ /// \code
+ /// template<typename T, typename U = T> struct pair;
+ /// \endcode
+ ///
+ /// this callback will be invoked twice: once for the type parameter \c T
+ /// with \p Depth=0 and \p Position=0, and once for the type parameter \c U
+ /// with \p Depth=0 and \p Position=1.
+ ///
+ /// \param Typename Specifies whether the keyword "typename" was used to
+ /// declare the type parameter (otherwise, "class" was used).
+ ///
+ /// \param Ellipsis Specifies whether this is a C++0x parameter pack.
+ ///
+ /// \param EllipsisLoc Specifies the start of the ellipsis.
+ ///
+ /// \param KeyLoc The location of the "class" or "typename" keyword.
+ ///
+ /// \param ParamName The name of the parameter, where NULL indicates an
+ /// unnamed template parameter.
+ ///
+ /// \param ParamNameLoc The location of the parameter name (if any).
+ ///
+ /// \param Depth The depth of this template parameter, e.g., the number of
+ /// template parameter lists that occurred outside the template parameter
+ /// list in which this template type parameter occurs.
+ ///
+ /// \param Position The zero-based position of this template parameter within
+ /// its template parameter list, which is also the number of template
+ /// parameters that precede this parameter in the template parameter list.
+ ///
+ /// \param EqualLoc The location of the '=' sign for the default template
+ /// argument, if any.
+ ///
+ /// \param DefaultArg The default argument, if provided.
virtual DeclPtrTy ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
- unsigned Depth, unsigned Position) {
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ TypeTy *DefaultArg) {
return DeclPtrTy();
}
- /// ActOnTypeParameterDefault - Adds a default argument (the type
- /// Default) to the given template type parameter (TypeParam).
- virtual void ActOnTypeParameterDefault(DeclPtrTy TypeParam,
- SourceLocation EqualLoc,
- SourceLocation DefaultLoc,
- TypeTy *Default) {
- }
-
- /// ActOnNonTypeTemplateParameter - Called when a C++ non-type
- /// template parameter (e.g., "int Size" in "template<int Size>
- /// class Array") has been parsed. S is the current scope and D is
- /// the parsed declarator. Depth and Position provide the number of
- /// enclosing templates (see
- /// ActOnTemplateParameterList) and the number of previous
- /// parameters within this template parameter list.
+ /// \brief Called when a C++ non-type template parameter has been parsed.
+ ///
+ /// Given
+ ///
+ /// \code
+ /// template<int Size> class Array;
+ /// \endcode
+ ///
+ /// This callback will be invoked for the 'Size' non-type template parameter.
+ ///
+ /// \param S The current scope.
+ ///
+ /// \param D The parsed declarator.
+ ///
+ /// \param Depth The depth of this template parameter, e.g., the number of
+ /// template parameter lists that occurred outside the template parameter
+ /// list in which this template type parameter occurs.
+ ///
+ /// \param Position The zero-based position of this template parameter within
+ /// its template parameter list, which is also the number of template
+ /// parameters that precede this parameter in the template parameter list.
+ ///
+ /// \param EqualLoc The location of the '=' sign for the default template
+ /// argument, if any.
+ ///
+ /// \param DefaultArg The default argument, if provided.
virtual DeclPtrTy ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
- unsigned Position) {
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ExprArg DefaultArg) {
return DeclPtrTy();
}
@@ -1874,29 +1961,50 @@ public:
ExprArg Default) {
}
- /// ActOnTemplateTemplateParameter - Called when a C++ template template
- /// parameter (e.g., "int T" in "template<template <typename> class T> class
- /// Array") has been parsed. TmpLoc is the location of the "template" keyword,
- /// TemplateParams is the sequence of parameters required by the template,
- /// ParamName is the name of the parameter (null if unnamed), and ParamNameLoc
- /// is the source location of the identifier (if given).
+ /// \brief Called when a C++ template template parameter has been parsed.
+ ///
+ /// Given
+ ///
+ /// \code
+ /// template<template <typename> class T> class X;
+ /// \endcode
+ ///
+ /// this callback will be invoked for the template template parameter \c T.
+ ///
+ /// \param S The scope in which this template template parameter occurs.
+ ///
+ /// \param TmpLoc The location of the "template" keyword.
+ ///
+ /// \param TemplateParams The template parameters required by the template.
+ ///
+ /// \param ParamName The name of the parameter, or NULL if unnamed.
+ ///
+ /// \param ParamNameLoc The source location of the parameter name (if given).
+ ///
+ /// \param Depth The depth of this template parameter, e.g., the number of
+ /// template parameter lists that occurred outside the template parameter
+ /// list in which this template parameter occurs.
+ ///
+ /// \param Position The zero-based position of this template parameter within
+ /// its template parameter list, which is also the number of template
+ /// parameters that precede this parameter in the template parameter list.
+ ///
+ /// \param EqualLoc The location of the '=' sign for the default template
+ /// argument, if any.
+ ///
+ /// \param DefaultArg The default argument, if provided.
virtual DeclPtrTy ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParamsTy *Params,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
- unsigned Position) {
+ unsigned Position,
+ SourceLocation EqualLoc,
+ const ParsedTemplateArgument &DefaultArg) {
return DeclPtrTy();
}
- /// \brief Adds a default argument to the given template template
- /// parameter.
- virtual void ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParam,
- SourceLocation EqualLoc,
- const ParsedTemplateArgument &Default) {
- }
-
/// ActOnTemplateParameterList - Called when a complete template
/// parameter list has been parsed, e.g.,
///
@@ -1980,6 +2088,8 @@ public:
/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
///
+ /// \param S The scope in which the dependent template name was parsed.
+ ///
/// \param TemplateKWLoc the location of the "template" keyword (if any).
///
/// \param SS the nested-name-specifier that precedes the "template" keyword
@@ -1995,12 +2105,21 @@ public:
///
/// \param EnteringContext whether we are entering the context of this
/// template.
- virtual TemplateTy ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
+ ///
+ /// \param Template Will be set to the dependent template name, on success.
+ ///
+ /// \returns The kind of template name that was produced. Generally, this will
+ /// be \c TNK_Dependent_template_name. However, if the nested-name-specifier
+ /// is not dependent, or refers to the current instantiation, then we may
+ /// be able to resolve the template kind more specifically.
+ virtual TemplateNameKind ActOnDependentTemplateName(Scope *S,
+ SourceLocation TemplateKWLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
TypeTy *ObjectType,
- bool EnteringContext) {
- return TemplateTy();
+ bool EnteringContext,
+ TemplateTy &Template) {
+ return TNK_Non_template;
}
/// \brief Process the declaration or definition of an explicit
@@ -2237,8 +2356,9 @@ public:
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
virtual TypeResult
- ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- const IdentifierInfo &II, SourceLocation IdLoc) {
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc) {
return TypeResult();
}
@@ -2251,11 +2371,22 @@ public:
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param Ty the type that the typename specifier refers to.
virtual TypeResult
- ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- SourceLocation TemplateLoc, TypeTy *Ty) {
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, SourceLocation TemplateLoc,
+ TypeTy *Ty) {
return TypeResult();
}
+ /// \brief Called when the parser begins parsing a construct which should not
+ /// have access control applied to it.
+ virtual void ActOnStartSuppressingAccessChecks() {
+ }
+
+ /// \brief Called when the parser finishes parsing a construct which should
+ /// not have access control applied to it.
+ virtual void ActOnStopSuppressingAccessChecks() {
+ }
+
//===----------------------- Obj-C Declarations -------------------------===//
// ActOnStartClassInterface - this action is called immediately after parsing
@@ -2565,7 +2696,9 @@ public:
//===---------------------------- Pragmas -------------------------------===//
enum PragmaOptionsAlignKind {
+ POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
+ POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
@@ -2727,7 +2860,27 @@ public:
/// \param NumArgs the number of arguments in \p Args.
virtual void CodeCompleteCall(Scope *S, ExprTy *Fn,
ExprTy **Args, unsigned NumArgs) { }
-
+
+ /// \brief Code completion for the initializer of a variable declaration.
+ ///
+ /// \param S The scope in which the initializer occurs.
+ ///
+ /// \param D The declaration being initialized.
+ virtual void CodeCompleteInitializer(Scope *S, DeclPtrTy D) { }
+
+ /// \brief Code completion after the "return" keyword within a function.
+ ///
+ /// \param S The scope in which the return statement occurs.
+ virtual void CodeCompleteReturn(Scope *S) { }
+
+ /// \brief Code completion for the right-hand side of an assignment or
+ /// compound assignment operator.
+ ///
+ /// \param S The scope in which the assignment occurs.
+ ///
+ /// \param LHS The left-hand side of the assignment expression.
+ virtual void CodeCompleteAssignmentRHS(Scope *S, ExprTy *LHS) { }
+
/// \brief Code completion for a C++ nested-name-specifier that precedes a
/// qualified-id of some form.
///
@@ -2851,6 +3004,14 @@ public:
unsigned NumMethods) {
}
+ /// \brief Code completion for the receiver in an Objective-C message send.
+ ///
+ /// This code completion action is invoked when we see a '[' that indicates
+ /// the start of an Objective-C message send.
+ ///
+ /// \param S The scope in which the Objective-C message send occurs.
+ virtual void CodeCompleteObjCMessageReceiver(Scope *S) { }
+
/// \brief Code completion for an ObjC message expression that sends
/// a message to the superclass.
///
@@ -2905,7 +3066,7 @@ public:
/// parsed.
virtual void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols) { }
-
+
/// \brief Code completion for a protocol declaration or definition, after
/// the @protocol but before any identifier.
///
@@ -2995,6 +3156,32 @@ public:
TypeTy *ReturnType,
DeclPtrTy IDecl) {
}
+
+ /// \brief Code completion for a selector identifier or argument name within
+ /// an Objective-C method declaration.
+ ///
+ /// \param S The scope in which this code completion occurs.
+ ///
+ /// \param IsInstanceMethod Whether we are parsing an instance method (or,
+ /// if false, a class method).
+ ///
+ /// \param AtParameterName Whether the actual code completion point is at the
+ /// argument name.
+ ///
+ /// \param ReturnType If non-NULL, the specified return type of the method
+ /// being declared or defined.
+ ///
+ /// \param SelIdents The identifiers that occurred in the selector for the
+ /// method declaration prior to the code completion point.
+ ///
+ /// \param NumSelIdents The number of identifiers provided by SelIdents.
+ virtual void CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ TypeTy *ReturnType,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents) { }
+
//@}
};
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Parse/AttributeList.h
index 1e6d3ab..b60a940 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/AttributeList.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/AttributeList.h
@@ -115,6 +115,7 @@ public:
AT_weakref,
AT_weak_import,
AT_reqd_wg_size,
+ AT_init_priority,
IgnoredAttribute,
UnknownAttribute
};
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Parse/DeclSpec.h
index 9c19a67..0e6dbec 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/DeclSpec.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/DeclSpec.h
@@ -170,6 +170,7 @@ private:
/*TST*/unsigned TypeSpecType : 5;
bool TypeAltiVecVector : 1;
bool TypeAltiVecPixel : 1;
+ bool TypeAltiVecBool : 1;
bool TypeSpecOwned : 1;
// type-qualifiers
@@ -237,6 +238,7 @@ public:
TypeSpecType(TST_unspecified),
TypeAltiVecVector(false),
TypeAltiVecPixel(false),
+ TypeAltiVecBool(false),
TypeSpecOwned(false),
TypeQualifiers(TSS_unspecified),
FS_inline_specified(false),
@@ -278,6 +280,7 @@ public:
TST getTypeSpecType() const { return (TST)TypeSpecType; }
bool isTypeAltiVecVector() const { return TypeAltiVecVector; }
bool isTypeAltiVecPixel() const { return TypeAltiVecPixel; }
+ bool isTypeAltiVecBool() const { return TypeAltiVecBool; }
bool isTypeSpecOwned() const { return TypeSpecOwned; }
void *getTypeRep() const { return TypeRep; }
CXXScopeSpec &getTypeSpecScope() { return TypeScope; }
@@ -885,6 +888,13 @@ struct DeclaratorChunk {
delete[] Exceptions;
}
+ /// isKNRPrototype - Return true if this is a K&R style identifier list,
+ /// like "void foo(a,b,c)". In a function definition, this will be followed
+ /// by the argument type definitions.
+ bool isKNRPrototype() const {
+ return !hasPrototype && NumArgs != 0;
+ }
+
SourceLocation getEllipsisLoc() const {
return SourceLocation::getFromRawEncoding(EllipsisLoc);
}
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
index 8081c24..b8c294a 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -72,6 +72,7 @@ namespace prec {
class Parser {
friend class PragmaUnusedHandler;
friend class ColonProtectionRAIIObject;
+ friend class ParenBraceBracketBalancer;
PrettyStackTraceParserEntry CrashInfo;
Preprocessor &PP;
@@ -93,7 +94,6 @@ class Parser {
/// and SemaActions for those uses that don't matter.
Action &Actions;
- Scope *CurScope;
Diagnostic &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
@@ -140,7 +140,8 @@ public:
Action &getActions() const { return Actions; }
const Token &getCurToken() const { return Tok; }
-
+ Scope *getCurScope() const { return Actions.getCurScope(); }
+
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef Action::ExprTy ExprTy;
@@ -832,8 +833,8 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(CXX0XAttributeList Attr);
- bool isDeclarationAfterDeclarator();
- bool isStartOfFunctionDefinition();
+ bool isDeclarationAfterDeclarator() const;
+ bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(AttributeList *Attr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
@@ -1059,6 +1060,7 @@ private:
OwningExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
OwningExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
OwningExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
+ bool isSimpleObjCMessageExpression();
OwningExprResult ParseObjCMessageExpression();
OwningExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
@@ -1345,14 +1347,14 @@ private:
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
- if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.CurScope, SS))
+ if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
- P.Actions.ActOnCXXExitDeclaratorScope(P.CurScope, SS);
+ P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Template.h b/contrib/llvm/tools/clang/include/clang/Parse/Template.h
index 1f8ccfb..84f4ed9 100644
--- a/contrib/llvm/tools/clang/include/clang/Parse/Template.h
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Template.h
@@ -58,7 +58,7 @@ namespace clang {
Loc(TemplateLoc), SS(SS) { }
/// \brief Determine whether the given template argument is invalid.
- bool isInvalid() { return Arg == 0; }
+ bool isInvalid() const { return Arg == 0; }
/// \brief Determine what kind of template argument we have.
KindType getKind() const { return Kind; }
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
new file mode 100644
index 0000000..5fb107c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
@@ -0,0 +1,45 @@
+//===--- ASTConsumers.h - ASTConsumer implementations -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef REWRITE_ASTCONSUMERS_H
+#define REWRITE_ASTCONSUMERS_H
+
+#include <string>
+
+namespace llvm {
+ class raw_ostream;
+}
+namespace clang {
+
+class ASTConsumer;
+class Diagnostic;
+class LangOptions;
+class Preprocessor;
+
+// ObjC rewriter: attempts tp rewrite ObjC constructs into pure C code.
+// This is considered experimental, and only works with Apple's ObjC runtime.
+ASTConsumer *CreateObjCRewriter(const std::string &InFile,
+ llvm::raw_ostream *OS,
+ Diagnostic &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning);
+
+/// CreateHTMLPrinter - Create an AST consumer which rewrites source code to
+/// HTML with syntax highlighting suitable for viewing in a web-browser.
+ASTConsumer *CreateHTMLPrinter(llvm::raw_ostream *OS, Preprocessor &PP,
+ bool SyntaxHighlight = true,
+ bool HighlightMacros = true);
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FixItRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
index b432d74..4ebcef0 100644
--- a/contrib/llvm/tools/clang/include/clang/Frontend/FixItRewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
@@ -12,8 +12,8 @@
// then forwards any diagnostics to the adapted diagnostic client.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_FIX_IT_REWRITER_H
-#define LLVM_CLANG_FRONTEND_FIX_IT_REWRITER_H
+#ifndef LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
+#define LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
@@ -101,4 +101,4 @@ public:
}
-#endif // LLVM_CLANG_FRONTEND_FIX_IT_REWRITER_H
+#endif // LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
new file mode 100644
index 0000000..2ff8d0a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
@@ -0,0 +1,69 @@
+//===-- FrontendActions.h - Useful Frontend Actions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITE_FRONTENDACTIONS_H
+#define LLVM_CLANG_REWRITE_FRONTENDACTIONS_H
+
+#include "clang/Frontend/FrontendAction.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+class FixItRewriter;
+class FixItPathRewriter;
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class HTMLPrintAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+};
+
+class FixItAction : public ASTFrontendAction {
+protected:
+ llvm::OwningPtr<FixItRewriter> Rewriter;
+ llvm::OwningPtr<FixItPathRewriter> PathRewriter;
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+
+ virtual bool BeginSourceFileAction(CompilerInstance &CI,
+ llvm::StringRef Filename);
+
+ virtual void EndSourceFileAction();
+
+ virtual bool hasASTFileSupport() const { return false; }
+
+public:
+ FixItAction();
+ ~FixItAction();
+};
+
+class RewriteObjCAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile);
+};
+
+class RewriteMacrosAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class RewriteTestAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h b/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
index c0bd741..cb3f8a8 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
@@ -16,6 +16,7 @@
#include <cstring>
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace clang {
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
index adda866..b3d4035 100644
--- a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
@@ -64,7 +64,7 @@ public:
/// the buffer is specified relative to the original SourceBuffer. The
/// text is inserted after the specified location.
///
- void InsertText(unsigned OrigOffset, const llvm::StringRef &Str,
+ void InsertText(unsigned OrigOffset, llvm::StringRef Str,
bool InsertAfter = true);
@@ -72,14 +72,14 @@ public:
/// offset in the buffer is specified relative to the original
/// SourceBuffer. The text is inserted before the specified location. This is
/// method is the same as InsertText with "InsertAfter == false".
- void InsertTextBefore(unsigned OrigOffset, const llvm::StringRef &Str) {
+ void InsertTextBefore(unsigned OrigOffset, llvm::StringRef Str) {
InsertText(OrigOffset, Str, false);
}
/// InsertTextAfter - Insert some text at the specified point, where the
/// offset in the buffer is specified relative to the original SourceBuffer.
/// The text is inserted after the specified location.
- void InsertTextAfter(unsigned OrigOffset, const llvm::StringRef &Str) {
+ void InsertTextAfter(unsigned OrigOffset, llvm::StringRef Str) {
InsertText(OrigOffset, Str);
}
@@ -87,7 +87,7 @@ public:
/// buffer with a new string. This is effectively a combined "remove/insert"
/// operation.
void ReplaceText(unsigned OrigOffset, unsigned OrigLength,
- const llvm::StringRef &NewStr);
+ llvm::StringRef NewStr);
private: // Methods only usable by Rewriter.
@@ -151,6 +151,7 @@ public:
/// getRangeSize - Return the size in bytes of the specified range if they
/// are in the same file. If not, this returns -1.
int getRangeSize(SourceRange Range) const;
+ int getRangeSize(const CharSourceRange &Range) const;
/// getRewrittenText - Return the rewritten form of the text in the specified
/// range. If the start or end of the range was unrewritable or if they are
@@ -163,7 +164,7 @@ public:
/// InsertText - Insert the specified string at the specified location in the
/// original buffer. This method returns true (and does nothing) if the input
/// location was not rewritable, false otherwise.
- bool InsertText(SourceLocation Loc, const llvm::StringRef &Str,
+ bool InsertText(SourceLocation Loc, llvm::StringRef Str,
bool InsertAfter = true);
/// InsertTextAfter - Insert the specified string at the specified location in
@@ -171,7 +172,7 @@ public:
/// the input location was not rewritable, false otherwise. Text is
/// inserted after any other text that has been previously inserted
/// at the some point (the default behavior for InsertText).
- bool InsertTextAfter(SourceLocation Loc, const llvm::StringRef &Str) {
+ bool InsertTextAfter(SourceLocation Loc, llvm::StringRef Str) {
return InsertText(Loc, Str);
}
@@ -180,7 +181,7 @@ public:
/// location was not rewritable, false otherwise. Text is
/// inserted before any other text that has been previously inserted
/// at the some point.
- bool InsertTextBefore(SourceLocation Loc, const llvm::StringRef &Str) {
+ bool InsertTextBefore(SourceLocation Loc, llvm::StringRef Str) {
return InsertText(Loc, Str, false);
}
@@ -191,7 +192,7 @@ public:
/// buffer with a new string. This is effectively a combined "remove/insert"
/// operation.
bool ReplaceText(SourceLocation Start, unsigned OrigLength,
- const llvm::StringRef &NewStr);
+ llvm::StringRef NewStr);
/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty
/// printer to generate the replacement code. This returns true if the input
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
new file mode 100644
index 0000000..669cf8c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
@@ -0,0 +1,31 @@
+//===--- Rewriters.h - Rewriter implementations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains miscellaneous utilities for various front-end actions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITE_REWRITERS_H
+#define LLVM_CLANG_REWRITE_REWRITERS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+class Preprocessor;
+
+/// RewriteMacrosInInput - Implement -rewrite-macros mode.
+void RewriteMacrosInInput(Preprocessor &PP, llvm::raw_ostream* OS);
+
+/// DoRewriteTest - A simple test for the TokenRewriter class.
+void DoRewriteTest(Preprocessor &PP, llvm::raw_ostream* OS);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
index 1f1c0cc..1d9d250 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -59,6 +59,18 @@ enum {
CCD_InBaseClass = 2
};
+/// \brief Priority value factors by which we will divide or multiply the
+/// priority of a code-completion result.
+enum {
+ /// \brief Divide by this factor when a code-completion result's type exactly
+ /// matches the type we expect.
+ CCF_ExactTypeMatch = 4,
+ /// \brief Divide by this factor when a code-completion result's type is
+ /// similar to the type we expect (e.g., both arithmetic types, both
+ /// Objective-C object pointer types).
+ CCF_SimilarTypeMatch = 2
+};
+
class FunctionDecl;
class FunctionType;
class FunctionTemplateDecl;
@@ -343,6 +355,10 @@ public:
/// method, etc.) should be considered "informative".
bool AllParametersAreInformative : 1;
+ /// \brief Whether we're completing a declaration of the given entity,
+ /// rather than a use of that entity.
+ bool DeclaringEntity : 1;
+
/// \brief If the result should have a nested-name-specifier, this is it.
/// When \c QualifierIsInformative, the nested-name-specifier is
/// informative rather than required.
@@ -356,7 +372,7 @@ public:
Priority(getPriorityFromDecl(Declaration)), StartParameter(0),
Hidden(false), QualifierIsInformative(QualifierIsInformative),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- Qualifier(Qualifier) {
+ DeclaringEntity(false), Qualifier(Qualifier) {
}
/// \brief Build a result that refers to a keyword or symbol.
@@ -364,21 +380,21 @@ public:
: Kind(RK_Keyword), Keyword(Keyword), Priority(Priority),
StartParameter(0), Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- Qualifier(0) { }
+ DeclaringEntity(false), Qualifier(0) { }
/// \brief Build a result that refers to a macro.
Result(IdentifierInfo *Macro, unsigned Priority = CCP_Macro)
: Kind(RK_Macro), Macro(Macro), Priority(Priority), StartParameter(0),
Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- Qualifier(0) { }
+ DeclaringEntity(false), Qualifier(0) { }
/// \brief Build a result that refers to a pattern.
Result(CodeCompletionString *Pattern, unsigned Priority = CCP_CodePattern)
: Kind(RK_Pattern), Pattern(Pattern), Priority(Priority),
StartParameter(0), Hidden(false), QualifierIsInformative(0),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- Qualifier(0) { }
+ DeclaringEntity(false), Qualifier(0) { }
/// \brief Retrieve the declaration stored in this result.
NamedDecl *getDeclaration() const {
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
index d27e292..ad42a84 100644
--- a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
@@ -29,6 +29,8 @@ public:
ExternalASTSource::SemaSource = true;
}
+ ~ExternalSemaSource();
+
/// \brief Initialize the semantic source with the Sema instance
/// being used to perform semantic analysis on the abstract syntax
/// tree.
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
index 851f8d1..d41051f 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -31,25 +31,128 @@
using namespace clang;
+unsigned ASTContext::NumImplicitDefaultConstructors;
+unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyConstructors;
+unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyAssignmentOperators;
+unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitDestructors;
+unsigned ASTContext::NumImplicitDestructorsDeclared;
+
enum FloatingRank {
FloatRank, DoubleRank, LongDoubleRank
};
+void
+ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm) {
+ ID.AddInteger(Parm->getDepth());
+ ID.AddInteger(Parm->getPosition());
+ // FIXME: Parameter pack
+
+ TemplateParameterList *Params = Parm->getTemplateParameters();
+ ID.AddInteger(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(TTP->isParameterPack());
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ ID.AddInteger(1);
+ // FIXME: Parameter pack
+ ID.AddPointer(NTTP->getType().getAsOpaquePtr());
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ ID.AddInteger(2);
+ Profile(ID, TTP);
+ }
+}
+
+TemplateTemplateParmDecl *
+ASTContext::getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP) {
+ // Check if we already have a canonical template template parameter.
+ llvm::FoldingSetNodeID ID;
+ CanonicalTemplateTemplateParm::Profile(ID, TTP);
+ void *InsertPos = 0;
+ CanonicalTemplateTemplateParm *Canonical
+ = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canonical)
+ return Canonical->getParam();
+
+ // Build a canonical template parameter list.
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ llvm::SmallVector<NamedDecl *, 4> CanonParams;
+ CanonParams.reserve(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ CanonParams.push_back(
+ TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getIndex(), 0, false,
+ TTP->isParameterPack()));
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ CanonParams.push_back(
+ NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ getCanonicalType(NTTP->getType()),
+ 0));
+ else
+ CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
+ cast<TemplateTemplateParmDecl>(*P)));
+ }
+
+ TemplateTemplateParmDecl *CanonTTP
+ = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(), 0,
+ TemplateParameterList::Create(*this, SourceLocation(),
+ SourceLocation(),
+ CanonParams.data(),
+ CanonParams.size(),
+ SourceLocation()));
+
+ // Get the new insert position for the node we care about.
+ Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ assert(Canonical == 0 && "Shouldn't be in the map!");
+ (void)Canonical;
+
+ // Create the canonical template template parameter entry.
+ Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
+ CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
+ return CanonTTP;
+}
+
ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
const TargetInfo &t,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins,
bool FreeMem, unsigned size_reserve) :
- GlobalNestedNameSpecifier(0), CFConstantStringTypeDecl(0),
- NSConstantStringTypeDecl(0),
+ TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ GlobalNestedNameSpecifier(0), IsInt128Installed(false),
+ CFConstantStringTypeDecl(0), NSConstantStringTypeDecl(0),
ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0), jmp_bufDecl(0),
sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
+ NullTypeSourceInfo(QualType()),
SourceMgr(SM), LangOpts(LOpts), FreeMemory(FreeMem), Target(t),
Idents(idents), Selectors(sels),
BuiltinInfo(builtins),
DeclarationNames(*this),
ExternalSource(0), PrintingPolicy(LOpts),
- LastSDM(0, 0) {
+ LastSDM(0, 0),
+ UniqueBlockByRefTypeID(0), UniqueBlockParmTypeID(0) {
ObjCIdRedefinitionType = QualType();
ObjCClassRedefinitionType = QualType();
ObjCSelRedefinitionType = QualType();
@@ -88,13 +191,6 @@ ASTContext::~ASTContext() {
Deallocate(&*I++);
}
- for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
- I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
- // Increment in loop to prevent using deallocated memory.
- if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
- R->Destroy(*this);
- }
-
for (llvm::DenseMap<const ObjCContainerDecl*,
const ASTRecordLayout*>::iterator
I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) {
@@ -104,6 +200,16 @@ ASTContext::~ASTContext() {
}
}
+ // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
+ // even when using the BumpPtrAllocator because they can contain
+ // DenseMaps.
+ for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+ }
+
// Destroy nested-name-specifiers.
for (llvm::FoldingSet<NestedNameSpecifier>::iterator
NNS = NestedNameSpecifiers.begin(),
@@ -155,11 +261,30 @@ void ASTContext::PrintStats() const {
#include "clang/AST/TypeNodes.def"
fprintf(stderr, "Total bytes = %d\n", int(TotalBytes));
-
+
+ // Implicit special member functions.
+ fprintf(stderr, " %u/%u implicit default constructors created\n",
+ NumImplicitDefaultConstructorsDeclared,
+ NumImplicitDefaultConstructors);
+ fprintf(stderr, " %u/%u implicit copy constructors created\n",
+ NumImplicitCopyConstructorsDeclared,
+ NumImplicitCopyConstructors);
+ fprintf(stderr, " %u/%u implicit copy assignment operators created\n",
+ NumImplicitCopyAssignmentOperatorsDeclared,
+ NumImplicitCopyAssignmentOperators);
+ fprintf(stderr, " %u/%u implicit destructors created\n",
+ NumImplicitDestructorsDeclared, NumImplicitDestructors);
+
+ if (!FreeMemory)
+ BumpAlloc.PrintStats();
+
if (ExternalSource.get()) {
fprintf(stderr, "\n");
ExternalSource->PrintStats();
}
+
+ if (!FreeMemory)
+ BumpAlloc.PrintStats();
}
@@ -273,13 +398,14 @@ ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
void
ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
- TemplateSpecializationKind TSK) {
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
assert(Inst->isStaticDataMember() && "Not a static data member");
assert(Tmpl->isStaticDataMember() && "Not a static data member");
assert(!InstantiatedFromStaticDataMember[Inst] &&
"Already noted what static data member was instantiated from");
InstantiatedFromStaticDataMember[Inst]
- = new (*this) MemberSpecializationInfo(Tmpl, TSK);
+ = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation);
}
NamedDecl *
@@ -358,6 +484,16 @@ ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
return Pos->second.end();
}
+unsigned
+ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.size();
+}
+
void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden) {
OverriddenMethods[Method].push_back(Overridden);
@@ -414,6 +550,15 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
T = getPointerType(RT->getPointeeType());
}
if (!T->isIncompleteType() && !T->isFunctionType()) {
+ unsigned MinWidth = Target.getLargeArrayMinWidth();
+ unsigned ArrayAlign = Target.getLargeArrayAlign();
+ if (isa<VariableArrayType>(T) && MinWidth != 0)
+ Align = std::max(Align, ArrayAlign);
+ if (ConstantArrayType *CT = dyn_cast<ConstantArrayType>(T)) {
+ unsigned Size = getTypeSize(CT);
+ if (MinWidth != 0 && MinWidth <= Size)
+ Align = std::max(Align, ArrayAlign);
+ }
// Incomplete or function types default to 1.
while (isa<VariableArrayType>(T) || isa<IncompleteArrayType>(T))
T = cast<ArrayType>(T)->getElementType();
@@ -762,7 +907,8 @@ void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
void ASTContext::CollectNonClassIvars(const ObjCInterfaceDecl *OI,
llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
// Find ivars declared in class extension.
- if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) {
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension()) {
for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
E = CDecl->ivar_end(); I != E; ++I) {
Ivars.push_back(*I);
@@ -827,7 +973,8 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) {
unsigned count = 0;
// Count ivars declared in class extension.
- if (const ObjCCategoryDecl *CDecl = OI->getClassExtension())
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension())
count += CDecl->ivar_size();
// Count ivar defined in this class's implementation. This
@@ -1406,7 +1553,7 @@ QualType ASTContext::getIncompleteArrayType(QualType EltTy,
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
- bool IsAltiVec, bool IsPixel) {
+ VectorType::AltiVecSpecific AltiVecSpec) {
BuiltinType *baseType;
baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
@@ -1414,8 +1561,8 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
- VectorType::Profile(ID, vecType, NumElts, Type::Vector,
- IsAltiVec, IsPixel);
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, AltiVecSpec);
+
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -1423,16 +1570,19 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
// If the element type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
- if (!vecType.isCanonical() || IsAltiVec || IsPixel) {
- Canonical = getVectorType(getCanonicalType(vecType),
- NumElts, false, false);
+ if (!vecType.isCanonical() || (AltiVecSpec == VectorType::AltiVec)) {
+ // pass VectorType::NotAltiVec for AltiVecSpec to make AltiVec canonical
+ // vector type (except 'vector bool ...' and 'vector Pixel') the same as
+ // the equivalent GCC vector types
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts,
+ VectorType::NotAltiVec);
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
}
VectorType *New = new (*this, TypeAlignment)
- VectorType(vecType, NumElts, Canonical, IsAltiVec, IsPixel);
+ VectorType(vecType, NumElts, Canonical, AltiVecSpec);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -1448,7 +1598,8 @@ QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
- VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, false, false);
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
+ VectorType::NotAltiVec);
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -1629,8 +1780,7 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
assert(NeedsInjectedClassNameType(Decl));
if (Decl->TypeForDecl) {
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else if (CXXRecordDecl *PrevDecl
- = cast_or_null<CXXRecordDecl>(Decl->getPreviousDeclaration())) {
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDeclaration()) {
assert(PrevDecl->TypeForDecl && "previous declaration has no type");
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
@@ -1658,11 +1808,11 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
assert(!Record->getPreviousDeclaration() &&
"struct/union has previous declaration");
assert(!NeedsInjectedClassNameType(Record));
- Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Record);
+ return getRecordType(Record);
} else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
assert(!Enum->getPreviousDeclaration() &&
"enum has previous declaration");
- Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Enum);
+ return getEnumType(Enum);
} else if (const UnresolvedUsingTypenameDecl *Using =
dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
Decl->TypeForDecl = new (*this, TypeAlignment) UnresolvedUsingType(Using);
@@ -1675,16 +1825,42 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
/// getTypedefType - Return the unique reference to the type for the
/// specified typename decl.
-QualType ASTContext::getTypedefType(const TypedefDecl *Decl) {
+QualType
+ASTContext::getTypedefType(const TypedefDecl *Decl, QualType Canonical) {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
- QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
+ if (Canonical.isNull())
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
Decl->TypeForDecl = new(*this, TypeAlignment)
TypedefType(Type::Typedef, Decl, Canonical);
Types.push_back(Decl->TypeForDecl);
return QualType(Decl->TypeForDecl, 0);
}
+QualType ASTContext::getRecordType(const RecordDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Decl);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+QualType ASTContext::getEnumType(const EnumDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Decl);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
/// \brief Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
@@ -1763,8 +1939,7 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgumentListInfo &Args,
- QualType Canon,
- bool IsCurrentInstantiation) {
+ QualType Canon) {
unsigned NumArgs = Args.size();
llvm::SmallVector<TemplateArgument, 4> ArgVec;
@@ -1773,56 +1948,18 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
ArgVec.push_back(Args[i].getArgument());
return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
- Canon, IsCurrentInstantiation);
+ Canon);
}
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
unsigned NumArgs,
- QualType Canon,
- bool IsCurrentInstantiation) {
+ QualType Canon) {
if (!Canon.isNull())
Canon = getCanonicalType(Canon);
- else {
- assert(!IsCurrentInstantiation &&
- "current-instantiation specializations should always "
- "have a canonical type");
-
- // Build the canonical template specialization type.
- TemplateName CanonTemplate = getCanonicalTemplateName(Template);
- llvm::SmallVector<TemplateArgument, 4> CanonArgs;
- CanonArgs.reserve(NumArgs);
- for (unsigned I = 0; I != NumArgs; ++I)
- CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
-
- // Determine whether this canonical template specialization type already
- // exists.
- llvm::FoldingSetNodeID ID;
- TemplateSpecializationType::Profile(ID, CanonTemplate, false,
- CanonArgs.data(), NumArgs, *this);
-
- void *InsertPos = 0;
- TemplateSpecializationType *Spec
- = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!Spec) {
- // Allocate a new canonical template specialization type.
- void *Mem = Allocate((sizeof(TemplateSpecializationType) +
- sizeof(TemplateArgument) * NumArgs),
- TypeAlignment);
- Spec = new (Mem) TemplateSpecializationType(*this, CanonTemplate, false,
- CanonArgs.data(), NumArgs,
- Canon);
- Types.push_back(Spec);
- TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
- }
-
- if (Canon.isNull())
- Canon = QualType(Spec, 0);
- assert(Canon->isDependentType() &&
- "Non-dependent template-id type must have a canonical type");
- }
+ else
+ Canon = getCanonicalTemplateSpecializationType(Template, Args, NumArgs);
// Allocate the (non-canonical) template specialization type, but don't
// try to unique it: these types typically have location information that
@@ -1831,8 +1968,7 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
sizeof(TemplateArgument) * NumArgs),
TypeAlignment);
TemplateSpecializationType *Spec
- = new (Mem) TemplateSpecializationType(*this, Template,
- IsCurrentInstantiation,
+ = new (Mem) TemplateSpecializationType(Template,
Args, NumArgs,
Canon);
@@ -1841,6 +1977,44 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
}
QualType
+ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ // Build the canonical template specialization type.
+ TemplateName CanonTemplate = getCanonicalTemplateName(Template);
+ llvm::SmallVector<TemplateArgument, 4> CanonArgs;
+ CanonArgs.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
+
+ // Determine whether this canonical template specialization type already
+ // exists.
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, CanonTemplate,
+ CanonArgs.data(), NumArgs, *this);
+
+ void *InsertPos = 0;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Spec) {
+ // Allocate a new canonical template specialization type.
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
+ CanonArgs.data(), NumArgs,
+ QualType());
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+ }
+
+ assert(Spec->isDependentType() &&
+ "Non-dependent template-id type must have a canonical type");
+ return QualType(Spec, 0);
+}
+
+QualType
ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
QualType NamedType) {
@@ -1898,44 +2072,69 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
}
QualType
-ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
- const TemplateSpecializationType *TemplateId,
- QualType Canon) {
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) {
+ // TODO: avoid this copy
+ llvm::SmallVector<TemplateArgument, 16> ArgCopy;
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ ArgCopy.push_back(Args[I].getArgument());
+ return getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ ArgCopy.size(),
+ ArgCopy.data());
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
llvm::FoldingSetNodeID ID;
- DependentNameType::Profile(ID, Keyword, NNS, TemplateId);
+ DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
+ Name, NumArgs, Args);
void *InsertPos = 0;
- DependentNameType *T
- = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentTemplateSpecializationType *T
+ = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
- if (Canon.isNull()) {
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
- QualType CanonType = getCanonicalType(QualType(TemplateId, 0));
- ElaboratedTypeKeyword CanonKeyword = Keyword;
- if (Keyword == ETK_None)
- CanonKeyword = ETK_Typename;
- if (CanonNNS != NNS || CanonKeyword != Keyword ||
- CanonType != QualType(TemplateId, 0)) {
- const TemplateSpecializationType *CanonTemplateId
- = CanonType->getAs<TemplateSpecializationType>();
- assert(CanonTemplateId &&
- "Canonical type must also be a template specialization type");
- Canon = getDependentNameType(CanonKeyword, CanonNNS, CanonTemplateId);
- }
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
- DependentNameType *CheckT
- = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Typename canonical type is broken"); (void)CheckT;
+ bool AnyNonCanonArgs = false;
+ llvm::SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
+ if (!CanonArgs[I].structurallyEquals(Args[I]))
+ AnyNonCanonArgs = true;
}
- T = new (*this) DependentNameType(Keyword, NNS, TemplateId, Canon);
+ QualType Canon;
+ if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
+ Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
+ Name, NumArgs,
+ CanonArgs.data());
+
+ // Find the insert position again.
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
+ Name, NumArgs, Args, Canon);
Types.push_back(T);
- DependentNameTypes.InsertNode(T, InsertPos);
+ DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
@@ -2326,6 +2525,48 @@ QualType ASTContext::getUnqualifiedArrayType(QualType T,
SourceRange());
}
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType &&
+ hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
+ QualType(T2MPType->getClass(), 0))) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ if (getLangOptions().ObjC1) {
+ const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
+ *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ if (T1OPType && T2OPType) {
+ T1 = T1OPType->getPointeeType();
+ T2 = T2OPType->getPointeeType();
+ return true;
+ }
+ }
+
+ // FIXME: Block pointers, too?
+
+ return false;
+}
+
DeclarationName ASTContext::getNameForTemplate(TemplateName Name) {
if (TemplateDecl *TD = Name.getAsTemplateDecl())
return TD->getDeclName();
@@ -2344,10 +2585,14 @@ DeclarationName ASTContext::getNameForTemplate(TemplateName Name) {
}
TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) {
- // If this template name refers to a template, the canonical
- // template name merely stores the template itself.
- if (TemplateDecl *Template = Name.getAsTemplateDecl())
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ Template = getCanonicalTemplateTemplateParmDecl(TTP);
+
+ // The canonical template name is the canonical template declaration.
return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
+ }
assert(!Name.getAsOverloadedTemplate());
@@ -2856,6 +3101,10 @@ QualType ASTContext::getObjCFastEnumerationStateType() {
Field->setAccess(AS_public);
ObjCFastEnumerationStateTypeDecl->addDecl(Field);
}
+ if (getLangOptions().CPlusPlus)
+ if (CXXRecordDecl *CXXRD =
+ dyn_cast<CXXRecordDecl>(ObjCFastEnumerationStateTypeDecl))
+ CXXRD->setEmpty(false);
ObjCFastEnumerationStateTypeDecl->completeDefinition();
}
@@ -2981,7 +3230,6 @@ QualType ASTContext::BuildByRefType(const char *DeclName, QualType Ty) {
bool HasCopyAndDispose = BlockRequiresCopying(Ty);
// FIXME: Move up
- static unsigned int UniqueBlockByRefTypeID = 0;
llvm::SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
++UniqueBlockByRefTypeID << '_' << DeclName;
@@ -3033,7 +3281,6 @@ QualType ASTContext::getBlockParmType(
llvm::SmallVectorImpl<const Expr *> &Layout) {
// FIXME: Move up
- static unsigned int UniqueBlockParmTypeID = 0;
llvm::SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__block_literal_"
<< ++UniqueBlockParmTypeID;
@@ -3122,7 +3369,7 @@ CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) {
CharUnits sz = getTypeSizeInChars(type);
// Make all integer and enum types at least as large as an int
- if (sz.isPositive() && type->isIntegralType())
+ if (sz.isPositive() && type->isIntegralOrEnumerationType())
sz = std::max(sz, getTypeSizeInChars(IntTy));
// Treat arrays as pointers, since that's how they're passed in.
else if (type->isArrayType())
@@ -3143,7 +3390,7 @@ void ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr,
QualType BlockTy =
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
// Encode result type.
- getObjCEncodingForType(cast<FunctionType>(BlockTy)->getResultType(), S);
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
@@ -3376,13 +3623,74 @@ void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
true /* outermost type */);
}
+static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) {
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: assert(0 && "Unhandled builtin type kind");
+ case BuiltinType::Void: return 'v';
+ case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: return 'C';
+ case BuiltinType::UShort: return 'S';
+ case BuiltinType::UInt: return 'I';
+ case BuiltinType::ULong:
+ return
+ (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'L' : 'Q';
+ case BuiltinType::UInt128: return 'T';
+ case BuiltinType::ULongLong: return 'Q';
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: return 'c';
+ case BuiltinType::Short: return 's';
+ case BuiltinType::WChar:
+ case BuiltinType::Int: return 'i';
+ case BuiltinType::Long:
+ return
+ (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'l' : 'q';
+ case BuiltinType::LongLong: return 'q';
+ case BuiltinType::Int128: return 't';
+ case BuiltinType::Float: return 'f';
+ case BuiltinType::Double: return 'd';
+ case BuiltinType::LongDouble: return 'd';
+ }
+}
+
static void EncodeBitField(const ASTContext *Context, std::string& S,
- const FieldDecl *FD) {
+ QualType T, const FieldDecl *FD) {
const Expr *E = FD->getBitWidth();
assert(E && "bitfield width not there - getObjCEncodingForTypeImpl");
ASTContext *Ctx = const_cast<ASTContext*>(Context);
- unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
S += 'b';
+ // The NeXT runtime encodes bit fields as b followed by the number of bits.
+ // The GNU runtime requires more information; bitfields are encoded as b,
+ // then the offset (in bits) of the first element, then the type of the
+ // bitfield, then the size in bits. For example, in this structure:
+ //
+ // struct
+ // {
+ // int integer;
+ // int flags:2;
+ // };
+ // On a 32-bit system, the encoding for flags would be b2 for the NeXT
+ // runtime, but b32i2 for the GNU runtime. The reason for this extra
+ // information is not especially sensible, but we're stuck with it for
+ // compatibility with GCC, although providing it breaks anything that
+ // actually uses runtime introspection and wants to work on both runtimes...
+ if (!Ctx->getLangOptions().NeXTRuntime) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
+ // FIXME: This same linear search is also used in ExprConstant - it might
+ // be better if the FieldDecl stored its offset. We'd be increasing the
+ // size of the object slightly, but saving some time every time it is used.
+ unsigned i = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == FD)
+ break;
+ }
+ S += llvm::utostr(RL.getFieldOffset(i));
+ S += ObjCEncodingForPrimitiveKind(Context, T);
+ }
+ unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
S += llvm::utostr(N);
}
@@ -3393,40 +3701,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
const FieldDecl *FD,
bool OutermostType,
bool EncodingProperty) {
- if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ if (T->getAs<BuiltinType>()) {
if (FD && FD->isBitField())
- return EncodeBitField(this, S, FD);
- char encoding;
- switch (BT->getKind()) {
- default: assert(0 && "Unhandled builtin type kind");
- case BuiltinType::Void: encoding = 'v'; break;
- case BuiltinType::Bool: encoding = 'B'; break;
- case BuiltinType::Char_U:
- case BuiltinType::UChar: encoding = 'C'; break;
- case BuiltinType::UShort: encoding = 'S'; break;
- case BuiltinType::UInt: encoding = 'I'; break;
- case BuiltinType::ULong:
- encoding =
- (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'L' : 'Q';
- break;
- case BuiltinType::UInt128: encoding = 'T'; break;
- case BuiltinType::ULongLong: encoding = 'Q'; break;
- case BuiltinType::Char_S:
- case BuiltinType::SChar: encoding = 'c'; break;
- case BuiltinType::Short: encoding = 's'; break;
- case BuiltinType::Int: encoding = 'i'; break;
- case BuiltinType::Long:
- encoding =
- (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'l' : 'q';
- break;
- case BuiltinType::LongLong: encoding = 'q'; break;
- case BuiltinType::Int128: encoding = 't'; break;
- case BuiltinType::Float: encoding = 'f'; break;
- case BuiltinType::Double: encoding = 'd'; break;
- case BuiltinType::LongDouble: encoding = 'd'; break;
- }
-
- S += encoding;
+ return EncodeBitField(this, S, T, FD);
+ S += ObjCEncodingForPrimitiveKind(this, T);
return;
}
@@ -3585,7 +3863,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
if (T->isEnumeralType()) {
if (FD && FD->isBitField())
- EncodeBitField(this, S, FD);
+ EncodeBitField(this, S, T, FD);
else
S += 'i';
return;
@@ -4728,7 +5006,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
// Turn <4 x signed int> -> <4 x unsigned int>
if (const VectorType *VTy = T->getAs<VectorType>())
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
- VTy->getNumElements(), VTy->isAltiVec(), VTy->isPixel());
+ VTy->getNumElements(), VTy->getAltiVecSpecific());
// For enums, we return the unsigned version of the base type.
if (const EnumType *ETy = T->getAs<EnumType>())
@@ -4886,7 +5164,8 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
// FIXME: Don't know what to do about AltiVec.
- Type = Context.getVectorType(ElementType, NumElements, false, false);
+ Type = Context.getVectorType(ElementType, NumElements,
+ VectorType::NotAltiVec);
break;
}
case 'X': {
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
index 6ed08d1..8d347d1 100644
--- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -73,6 +73,7 @@ namespace {
// FIXME: TemplateSpecializationType
QualType VisitElaboratedType(ElaboratedType *T);
// FIXME: DependentNameType
+ // FIXME: DependentTemplateSpecializationType
QualType VisitObjCInterfaceType(ObjCInterfaceType *T);
QualType VisitObjCObjectType(ObjCObjectType *T);
QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T);
@@ -439,9 +440,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
if (Vec1->getNumElements() != Vec2->getNumElements())
return false;
- if (Vec1->isAltiVec() != Vec2->isAltiVec())
- return false;
- if (Vec1->isPixel() != Vec2->isPixel())
+ if (Vec1->getAltiVecSpecific() != Vec2->getAltiVecSpecific())
return false;
break;
}
@@ -619,14 +618,32 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
Typename2->getIdentifier()))
return false;
- if (!IsStructurallyEquivalent(Context,
- QualType(Typename1->getTemplateId(), 0),
- QualType(Typename2->getTemplateId(), 0)))
- return false;
break;
}
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec1 =
+ cast<DependentTemplateSpecializationType>(T1);
+ const DependentTemplateSpecializationType *Spec2 =
+ cast<DependentTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getQualifier(),
+ Spec2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
+ Spec2->getIdentifier()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
case Type::ObjCInterface: {
const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
@@ -1172,8 +1189,7 @@ QualType ASTNodeImporter::VisitVectorType(VectorType *T) {
return Importer.getToContext().getVectorType(ToElementType,
T->getNumElements(),
- T->isAltiVec(),
- T->isPixel());
+ T->getAltiVecSpecific());
}
QualType ASTNodeImporter::VisitExtVectorType(ExtVectorType *T) {
@@ -1687,7 +1703,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// Create the record declaration.
RecordDecl *D2 = AdoptDecl;
if (!D2) {
- if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D)) {
+ if (isa<CXXRecordDecl>(D)) {
CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
D->getTagKind(),
DC, Loc,
@@ -1695,30 +1711,6 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Importer.Import(D->getTagKeywordLoc()));
D2 = D2CXX;
D2->setAccess(D->getAccess());
-
- if (D->isDefinition()) {
- // Add base classes.
- llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
- for (CXXRecordDecl::base_class_iterator
- Base1 = D1CXX->bases_begin(),
- FromBaseEnd = D1CXX->bases_end();
- Base1 != FromBaseEnd;
- ++Base1) {
- QualType T = Importer.Import(Base1->getType());
- if (T.isNull())
- return 0;
-
- Bases.push_back(
- new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
- Base1->isVirtual(),
- Base1->isBaseOfClass(),
- Base1->getAccessSpecifierAsWritten(),
- T));
- }
- if (!Bases.empty())
- D2CXX->setBases(Bases.data(), Bases.size());
- }
} else {
D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
DC, Loc,
@@ -1739,6 +1731,33 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (D->isDefinition()) {
D2->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ CXXRecordDecl *D1CXX = cast<CXXRecordDecl>(D);
+
+ llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (CXXRecordDecl::base_class_iterator
+ Base1 = D1CXX->bases_begin(),
+ FromBaseEnd = D1CXX->bases_end();
+ Base1 != FromBaseEnd;
+ ++Base1) {
+ QualType T = Importer.Import(Base1->getType());
+ if (T.isNull())
+ return 0;
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
+ Base1->isVirtual(),
+ Base1->isBaseOfClass(),
+ Base1->getAccessSpecifierAsWritten(),
+ T));
+ }
+ if (!Bases.empty())
+ D2CXX->setBases(Bases.data(), Bases.size());
+ }
+
ImportDeclContext(D);
D2->completeDefinition();
}
@@ -2598,8 +2617,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
+ TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo());
+ if (!T)
return 0;
// Create the new property.
@@ -2614,6 +2633,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
LexicalDC->addDecl(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setPropertyAttributesAsWritten(
+ D->getPropertyAttributesAsWritten());
ToProperty->setGetterName(Importer.Import(D->getGetterName()));
ToProperty->setSetterName(Importer.Import(D->getSetterName()));
ToProperty->setGetterMethodDecl(
diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
index 0fab22c..b09ba895 100644
--- a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
@@ -24,7 +24,7 @@ void Attr::Destroy(ASTContext &C) {
C.Deallocate((void*)this);
}
-AttrWithString::AttrWithString(Attr::Kind AK, ASTContext &C, llvm::StringRef s)
+AttrWithString::AttrWithString(attr::Kind AK, ASTContext &C, llvm::StringRef s)
: Attr(AK) {
assert(!s.empty());
StrLen = s.size();
@@ -51,7 +51,7 @@ void FormatAttr::setType(ASTContext &C, llvm::StringRef type) {
}
NonNullAttr::NonNullAttr(ASTContext &C, unsigned* arg_nums, unsigned size)
- : Attr(NonNull), ArgNums(0), Size(0) {
+ : Attr(attr::NonNull), ArgNums(0), Size(0) {
if (size == 0)
return;
assert(arg_nums);
@@ -93,6 +93,7 @@ DEF_SIMPLE_ATTR_CLONE(NSReturnsNotRetained)
DEF_SIMPLE_ATTR_CLONE(NSReturnsRetained)
DEF_SIMPLE_ATTR_CLONE(NoDebug)
DEF_SIMPLE_ATTR_CLONE(NoInline)
+DEF_SIMPLE_ATTR_CLONE(NoInstrumentFunction)
DEF_SIMPLE_ATTR_CLONE(NoReturn)
DEF_SIMPLE_ATTR_CLONE(NoThrow)
DEF_SIMPLE_ATTR_CLONE(ObjCException)
@@ -200,6 +201,10 @@ Attr *ReqdWorkGroupSizeAttr::clone(ASTContext &C) const {
return ::new (C) ReqdWorkGroupSizeAttr(X, Y, Z);
}
+Attr *InitPriorityAttr::clone(ASTContext &C) const {
+ return ::new (C) InitPriorityAttr(Priority);
+}
+
Attr *MSP430InterruptAttr::clone(ASTContext &C) const {
return ::new (C) MSP430InterruptAttr(Number);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt b/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt
index bce3646..407ed95 100644
--- a/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt
@@ -18,6 +18,7 @@ add_clang_library(clangAST
DeclPrinter.cpp
DeclTemplate.cpp
Expr.cpp
+ ExprClassification.cpp
ExprConstant.cpp
ExprCXX.cpp
FullExpr.cpp
@@ -39,4 +40,5 @@ add_clang_library(clangAST
TypePrinter.cpp
)
-add_dependencies(clangAST ClangDiagnosticAST ClangStmtNodes)
+add_dependencies(clangAST ClangARMNeon ClangAttrClasses ClangAttrList
+ ClangDiagnosticAST ClangDeclNodes ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
index d616e42..c563c37 100644
--- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -90,6 +90,9 @@ bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) cons
}
bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+ if (!getNumVBases())
+ return false;
+
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
/*DetectVirtual=*/false);
@@ -559,22 +562,23 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
const CXXMethodDecl *CanonOM
= cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());
+
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+
if (CanonOM->begin_overridden_methods()
- == CanonOM->end_overridden_methods()) {
- // C++ [class.virtual]p2:
- // A virtual member function C::vf of a class object S is
- // a final overrider unless the most derived class (1.8)
- // of which S is a base class subobject (if any) declares
- // or inherits another member function that overrides vf.
- //
- // Treating this object like the most derived class, we
- // replace any overrides from base classes with this
- // overriding virtual function.
- Overriders[CanonOM].replaceAll(
- UniqueVirtualMethod(CanonM, SubobjectNumber,
- InVirtualSubobject));
+ == CanonOM->end_overridden_methods())
continue;
- }
// Continue recursion to the methods that this virtual method
// overrides.
@@ -582,6 +586,12 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
CanonOM->end_overridden_methods()));
}
}
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
}
}
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
index ffdcb47..149938f 100644
--- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -523,6 +523,14 @@ bool NamedDecl::isCXXInstanceMember() const {
// DeclaratorDecl Implementation
//===----------------------------------------------------------------------===//
+template <typename DeclT>
+static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
+ if (decl->getNumTemplateParameterLists() > 0)
+ return decl->getTemplateParameterList(0)->getTemplateLoc();
+ else
+ return decl->getInnerLocStart();
+}
+
DeclaratorDecl::~DeclaratorDecl() {}
void DeclaratorDecl::Destroy(ASTContext &C) {
if (hasExtInfo())
@@ -531,15 +539,8 @@ void DeclaratorDecl::Destroy(ASTContext &C) {
}
SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
- if (DeclInfo) {
- TypeLoc TL = getTypeSourceInfo()->getTypeLoc();
- while (true) {
- TypeLoc NextTL = TL.getNextTypeLoc();
- if (!NextTL)
- return TL.getLocalSourceRange().getBegin();
- TL = NextTL;
- }
- }
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (TSI) return TSI->getTypeLoc().getBeginLoc();
return SourceLocation();
}
@@ -573,6 +574,40 @@ void DeclaratorDecl::setQualifierInfo(NestedNameSpecifier *Qualifier,
}
}
+SourceLocation DeclaratorDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+void
+QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert((NumTPLists == 0 || TPLists != 0) &&
+ "Empty array of template parameters with positive size!");
+ assert((NumTPLists == 0 || NNS) &&
+ "Nonempty array of template parameters with no qualifier!");
+
+ // Free previous template parameters (if any).
+ if (NumTemplParamLists > 0) {
+ Context.Deallocate(TemplParamLists);
+ TemplParamLists = 0;
+ NumTemplParamLists = 0;
+ }
+ // Set info on matched template parameter lists (if any).
+ if (NumTPLists > 0) {
+ TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ NumTemplParamLists = NumTPLists;
+ for (unsigned i = NumTPLists; i-- > 0; )
+ TemplParamLists[i] = TPLists[i];
+ }
+}
+
+void QualifierInfo::Destroy(ASTContext &Context) {
+ // FIXME: Deallocate template parameter lists themselves!
+ if (TemplParamLists)
+ Context.Deallocate(TemplParamLists);
+}
+
//===----------------------------------------------------------------------===//
// VarDecl Implementation
//===----------------------------------------------------------------------===//
@@ -613,14 +648,17 @@ void VarDecl::Destroy(ASTContext& C) {
VarDecl::~VarDecl() {
}
-SourceRange VarDecl::getSourceRange() const {
+SourceLocation VarDecl::getInnerLocStart() const {
SourceLocation Start = getTypeSpecStartLoc();
if (Start.isInvalid())
Start = getLocation();
-
+ return Start;
+}
+
+SourceRange VarDecl::getSourceRange() const {
if (getInit())
- return SourceRange(Start, getInit()->getLocEnd());
- return SourceRange(Start, getLocation());
+ return SourceRange(getOuterLocStart(), getInit()->getLocEnd());
+ return SourceRange(getOuterLocStart(), getLocation());
}
bool VarDecl::isExternC() const {
@@ -678,7 +716,15 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const {
// AST for 'extern "C" int foo;' is annotated with 'extern'.
if (hasExternalStorage())
return DeclarationOnly;
-
+
+ if (getStorageClassAsWritten() == Extern ||
+ getStorageClassAsWritten() == PrivateExtern) {
+ for (const VarDecl *PrevVar = getPreviousDeclaration();
+ PrevVar; PrevVar = PrevVar->getPreviousDeclaration()) {
+ if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit())
+ return DeclarationOnly;
+ }
+ }
// C99 6.9.2p2:
// A declaration of an object that has file scope without an initializer,
// and without a storage class specifier or the scs 'static', constitutes
@@ -697,7 +743,7 @@ VarDecl *VarDecl::getActingDefinition() {
if (Kind != TentativeDefinition)
return 0;
- VarDecl *LastTentative = false;
+ VarDecl *LastTentative = 0;
VarDecl *First = getFirstDeclaration();
for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
I != E; ++I) {
@@ -907,6 +953,17 @@ bool FunctionDecl::isVariadic() const {
return false;
}
+bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body) {
+ Definition = *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
if (I->Body) {
@@ -1107,11 +1164,11 @@ bool FunctionDecl::isInlined() const {
}
const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
- Stmt *Pattern = 0;
+ bool HasPattern = false;
if (PatternDecl)
- Pattern = PatternDecl->getBody(PatternDecl);
+ HasPattern = PatternDecl->hasBody(PatternDecl);
- if (Pattern && PatternDecl)
+ if (HasPattern && PatternDecl)
return PatternDecl->isInlined();
return false;
@@ -1197,6 +1254,23 @@ const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
return 0;
}
+FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
+ if (TemplateOrSpecialization.isNull())
+ return TK_NonTemplate;
+ if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ return TK_FunctionTemplate;
+ if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
+ return TK_MemberSpecialization;
+ if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
+ return TK_FunctionTemplateSpecialization;
+ if (TemplateOrSpecialization.is
+ <DependentFunctionTemplateSpecializationInfo*>())
+ return TK_DependentFunctionTemplateSpecialization;
+
+ assert(false && "Did we miss a TemplateOrSpecialization type?");
+ return TK_NonTemplate;
+}
+
FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
return cast<FunctionDecl>(Info->getInstantiatedFrom());
@@ -1239,15 +1313,15 @@ bool FunctionDecl::isImplicitlyInstantiable() const {
// Find the actual template from which we will instantiate.
const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
- Stmt *Pattern = 0;
+ bool HasPattern = false;
if (PatternDecl)
- Pattern = PatternDecl->getBody(PatternDecl);
+ HasPattern = PatternDecl->hasBody(PatternDecl);
// C++0x [temp.explicit]p9:
// Except for inline functions, other explicit instantiation declarations
// have the effect of suppressing the implicit instantiation of the entity
// to which they refer.
- if (!Pattern || !PatternDecl)
+ if (!HasPattern || !PatternDecl)
return true;
return PatternDecl->isInlined();
@@ -1304,7 +1378,8 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK,
- const TemplateArgumentListInfo *TemplateArgsAsWritten) {
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
FunctionTemplateSpecializationInfo *Info
@@ -1317,6 +1392,7 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
Info->Template.setInt(TSK - 1);
Info->TemplateArguments = TemplateArgs;
Info->TemplateArgumentsAsWritten = TemplateArgsAsWritten;
+ Info->PointOfInstantiation = PointOfInstantiation;
TemplateOrSpecialization = Info;
// Insert this function template specialization into the set of known
@@ -1336,6 +1412,28 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
}
void
+FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
+ unsigned NumTemplateArgs,
+ const TemplateArgument *TemplateArgs,
+ TemplateSpecializationKind TSK,
+ unsigned NumTemplateArgsAsWritten,
+ TemplateArgumentLoc *TemplateArgsAsWritten,
+ SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc,
+ SourceLocation PointOfInstantiation) {
+ ASTContext &Ctx = getASTContext();
+ TemplateArgumentList *TemplArgs
+ = new (Ctx) TemplateArgumentList(Ctx, TemplateArgs, NumTemplateArgs);
+ TemplateArgumentListInfo *TemplArgsInfo
+ = new (Ctx) TemplateArgumentListInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i=0; i != NumTemplateArgsAsWritten; ++i)
+ TemplArgsInfo->addArgument(TemplateArgsAsWritten[i]);
+
+ setFunctionTemplateSpecialization(Template, TemplArgs, /*InsertPos=*/0, TSK,
+ TemplArgsInfo, PointOfInstantiation);
+}
+
+void
FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs) {
@@ -1427,7 +1525,7 @@ bool FunctionDecl::isOutOfLine() const {
// class template, check whether that member function was defined out-of-line.
if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
const FunctionDecl *Definition;
- if (FD->getBody(Definition))
+ if (FD->hasBody(Definition))
return Definition->isOutOfLine();
}
@@ -1435,7 +1533,7 @@ bool FunctionDecl::isOutOfLine() const {
// check whether that function template was defined out-of-line.
if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
const FunctionDecl *Definition;
- if (FunTmpl->getTemplatedDecl()->getBody(Definition))
+ if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
return Definition->isOutOfLine();
}
@@ -1472,9 +1570,13 @@ void TagDecl::Destroy(ASTContext &C) {
TypeDecl::Destroy(C);
}
+SourceLocation TagDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
SourceRange TagDecl::getSourceRange() const {
SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
- return SourceRange(TagKeywordLoc, E);
+ return SourceRange(getOuterLocStart(), E);
}
TagDecl* TagDecl::getCanonicalDecl() {
@@ -1569,6 +1671,10 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return Enum;
}
+EnumDecl *EnumDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) EnumDecl(0, SourceLocation(), 0, 0, SourceLocation());
+}
+
void EnumDecl::Destroy(ASTContext& C) {
TagDecl::Destroy(C);
}
@@ -1608,6 +1714,11 @@ RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
+RecordDecl *RecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) RecordDecl(Record, TTK_Struct, 0, SourceLocation(), 0, 0,
+ SourceLocation());
+}
+
RecordDecl::~RecordDecl() {
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
index 42a3726..d4f997d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -35,16 +35,18 @@ using namespace clang;
// Statistics
//===----------------------------------------------------------------------===//
-#define DECL(Derived, Base) static int n##Derived##s = 0;
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
static bool StatSwitch = false;
const char *Decl::getDeclKindName() const {
switch (DeclKind) {
- default: assert(0 && "Declaration not in DeclNodes.def!");
-#define DECL(Derived, Base) case Derived: return #Derived;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -60,9 +62,10 @@ void Decl::setInvalidDecl(bool Invalid) {
const char *DeclContext::getDeclKindName() const {
switch (DeclKind) {
- default: assert(0 && "Declaration context not in DeclNodes.def!");
-#define DECL(Derived, Base) case Decl::Derived: return #Derived;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration context not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -75,28 +78,31 @@ void Decl::PrintStats() {
fprintf(stderr, "*** Decl Stats:\n");
int totalDecls = 0;
-#define DECL(Derived, Base) totalDecls += n##Derived##s;
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
fprintf(stderr, " %d decls total.\n", totalDecls);
int totalBytes = 0;
-#define DECL(Derived, Base) \
- if (n##Derived##s > 0) { \
- totalBytes += (int)(n##Derived##s * sizeof(Derived##Decl)); \
- fprintf(stderr, " %d " #Derived " decls, %d each (%d bytes)\n", \
- n##Derived##s, (int)sizeof(Derived##Decl), \
- (int)(n##Derived##s * sizeof(Derived##Decl))); \
+#define DECL(DERIVED, BASE) \
+ if (n##DERIVED##s > 0) { \
+ totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
+ fprintf(stderr, " %d " #DERIVED " decls, %d each (%d bytes)\n", \
+ n##DERIVED##s, (int)sizeof(DERIVED##Decl), \
+ (int)(n##DERIVED##s * sizeof(DERIVED##Decl))); \
}
-#include "clang/AST/DeclNodes.def"
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
fprintf(stderr, "Total bytes = %d\n", totalBytes);
}
-void Decl::addDeclKind(Kind k) {
+void Decl::add(Kind k) {
switch (k) {
- default: assert(0 && "Declaration not in DeclNodes.def!");
-#define DECL(Derived, Base) case Derived: ++n##Derived##s; break;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -206,17 +212,17 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
-bool Decl::isUsed() const {
+bool Decl::isUsed(bool CheckUsedAttr) const {
if (Used)
return true;
// Check for used attribute.
- if (hasAttr<UsedAttr>())
+ if (CheckUsedAttr && hasAttr<UsedAttr>())
return true;
// Check redeclarations for used attribute.
for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
- if (I->hasAttr<UsedAttr>() || I->Used)
+ if ((CheckUsedAttr && I->hasAttr<UsedAttr>()) || I->Used)
return true;
}
@@ -285,6 +291,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
// Never have names.
case Friend:
case FriendTemplate:
+ case AccessSpec:
case LinkageSpec:
case FileScopeAsm:
case StaticAssert:
@@ -307,9 +314,20 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
return 0;
}
+void Decl::initAttrs(Attr *attrs) {
+ assert(!HasAttrs && "Decl already contains attrs.");
+
+ Attr *&AttrBlank = getASTContext().getDeclAttrs(this);
+ assert(AttrBlank == 0 && "HasAttrs was wrong?");
+
+ AttrBlank = attrs;
+ HasAttrs = true;
+}
+
void Decl::addAttr(Attr *NewAttr) {
Attr *&ExistingAttr = getASTContext().getDeclAttrs(this);
+ assert(NewAttr->getNext() == 0 && "Chain of attributes will be truncated!");
NewAttr->setNext(ExistingAttr);
ExistingAttr = NewAttr;
@@ -354,7 +372,6 @@ void Decl::swapAttrs(Decl *RHS) {
RHS->HasAttrs = true;
}
-
void Decl::Destroy(ASTContext &C) {
// Free attributes for this decl.
if (HasAttrs) {
@@ -392,16 +409,18 @@ void Decl::Destroy(ASTContext &C) {
Decl *Decl::castFromDeclContext (const DeclContext *D) {
Decl::Kind DK = D->getDeclKind();
switch(DK) {
-#define DECL_CONTEXT(Name) \
- case Decl::Name: \
- return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
- return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.inc"
assert(false && "a decl that inherits DeclContext isn't handled");
return 0;
}
@@ -410,46 +429,51 @@ Decl *Decl::castFromDeclContext (const DeclContext *D) {
DeclContext *Decl::castToDeclContext(const Decl *D) {
Decl::Kind DK = D->getKind();
switch(DK) {
-#define DECL_CONTEXT(Name) \
- case Decl::Name: \
- return static_cast<Name##Decl*>(const_cast<Decl*>(D));
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
- return static_cast<Name##Decl*>(const_cast<Decl*>(D));
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.inc"
assert(false && "a decl that inherits DeclContext isn't handled");
return 0;
}
}
-CompoundStmt* Decl::getCompoundBody() const {
- return dyn_cast_or_null<CompoundStmt>(getBody());
-}
-
SourceLocation Decl::getBodyRBrace() const {
- Stmt *Body = getBody();
- if (!Body)
+ // Special handling of FunctionDecl to avoid de-serializing the body from PCH.
+ // FunctionDecl stores EndRangeLoc for this purpose.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->getSourceRange().getEnd();
return SourceLocation();
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
- return CS->getRBracLoc();
- assert(isa<CXXTryStmt>(Body) &&
- "Body can only be CompoundStmt or CXXTryStmt");
- return cast<CXXTryStmt>(Body)->getSourceRange().getEnd();
+ }
+
+ if (Stmt *Body = getBody())
+ return Body->getSourceRange().getEnd();
+
+ return SourceLocation();
}
#ifndef NDEBUG
void Decl::CheckAccessDeclContext() const {
+ // FIXME: Disable this until rdar://8146294 "access specifier for inner class
+ // templates is not set or checked" is fixed.
+ return;
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
// 2. this is a template parameter (and thus doesn't belong to its context)
- // 3. this is a ParmVarDecl (which can be in a record context during
- // the brief period between its creation and the creation of the
- // FunctionDecl)
- // 4. the context is not a record
+ // 3. the context is not a record
+ // 4. it's invalid
if (isa<TranslationUnitDecl>(this) ||
+ isa<TemplateTypeParmDecl>(this) ||
!isa<CXXRecordDecl>(getDeclContext()) ||
isInvalidDecl())
return;
@@ -466,16 +490,18 @@ void Decl::CheckAccessDeclContext() const {
bool DeclContext::classof(const Decl *D) {
switch (D->getKind()) {
-#define DECL_CONTEXT(Name) case Decl::Name:
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) case Decl::NAME:
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
return true;
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (D->getKind() >= Decl::Name##First && \
- D->getKind() <= Decl::Name##Last) \
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (D->getKind() >= Decl::first##NAME && \
+ D->getKind() <= Decl::last##NAME) \
return true;
-#include "clang/AST/DeclNodes.def"
+#include "clang/AST/DeclNodes.inc"
return false;
}
}
@@ -537,7 +563,7 @@ bool DeclContext::isTransparentContext() const {
return true; // FIXME: Check for C++0x scoped enums
else if (DeclKind == Decl::LinkageSpec)
return true;
- else if (DeclKind >= Decl::RecordFirst && DeclKind <= Decl::RecordLast)
+ else if (DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord)
return cast<RecordDecl>(this)->isAnonymousStructOrUnion();
else if (DeclKind == Decl::Namespace)
return false; // FIXME: Check for C++0x inline namespaces
@@ -581,7 +607,7 @@ DeclContext *DeclContext::getPrimaryContext() {
return this;
default:
- if (DeclKind >= Decl::TagFirst && DeclKind <= Decl::TagLast) {
+ if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
// If this is a tag type that has a definition or is currently
// being defined, that definition is our primary context.
TagDecl *Tag = cast<TagDecl>(this);
@@ -602,7 +628,7 @@ DeclContext *DeclContext::getPrimaryContext() {
return Tag;
}
- assert(DeclKind >= Decl::FunctionFirst && DeclKind <= Decl::FunctionLast &&
+ assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
"Unknown DeclContext kind");
return this;
}
@@ -626,9 +652,8 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
ExternalASTSource *Source = getParentASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
- llvm::SmallVector<uint32_t, 64> Decls;
- if (Source->ReadDeclsLexicallyInContext(const_cast<DeclContext *>(this),
- Decls))
+ llvm::SmallVector<Decl*, 64> Decls;
+ if (Source->FindExternalLexicalDecls(this, Decls))
return;
// There is no longer any lexical storage in this context
@@ -642,7 +667,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
Decl *FirstNewDecl = 0;
Decl *PrevDecl = 0;
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- Decl *D = Source->GetDecl(Decls[I]);
+ Decl *D = Decls[I];
if (PrevDecl)
PrevDecl->NextDeclInContext = D;
else
@@ -659,28 +684,83 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
LastDecl = PrevDecl;
}
-void
-DeclContext::LoadVisibleDeclsFromExternalStorage() const {
- DeclContext *This = const_cast<DeclContext *>(this);
- ExternalASTSource *Source = getParentASTContext().getExternalSource();
- assert(hasExternalVisibleStorage() && Source && "No external storage?");
+DeclContext::lookup_result
+ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ assert(List.isNull());
+ (void) List;
+
+ return DeclContext::lookup_result();
+}
- llvm::SmallVector<VisibleDeclaration, 64> Decls;
- if (Source->ReadDeclsVisibleInContext(This, Decls))
- return;
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ const VisibleDeclaration &VD) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[VD.Name];
+ List.setFromDeclIDs(VD.Declarations);
+ return List.getLookupResult(Context);
+}
- // There is no longer any visible storage in this context
- ExternalVisibleStorage = false;
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl*> &Decls) {
+ ASTContext &Context = DC->getParentASTContext();;
- // Load the declaration IDs for all of the names visible in this
- // context.
- assert(!LookupPtr && "Have a lookup map before de-serialization?");
- StoredDeclsMap *Map = CreateStoredDeclsMap(getParentASTContext());
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (List.isNull())
+ List.setOnlyValue(Decls[I]);
+ else
+ List.AddSubsequentDecl(Decls[I]);
+ }
+
+ return List.getLookupResult(Context);
+}
+
+void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<VisibleDeclaration> &Decls) {
+ // There is no longer any visible storage in this context.
+ DC->ExternalVisibleStorage = false;
+
+ assert(!DC->LookupPtr && "Have a lookup map before de-serialization?");
+ StoredDeclsMap *Map = DC->CreateStoredDeclsMap(DC->getParentASTContext());
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
(*Map)[Decls[I].Name].setFromDeclIDs(Decls[I].Declarations);
}
}
+void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<NamedDecl*> &Decls) {
+ // There is no longer any visible storage in this context.
+ DC->ExternalVisibleStorage = false;
+
+ assert(!DC->LookupPtr && "Have a lookup map before de-serialization?");
+ StoredDeclsMap &Map = *DC->CreateStoredDeclsMap(DC->getParentASTContext());
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ StoredDeclsList &List = Map[Decls[I]->getDeclName()];
+ if (List.isNull())
+ List.setOnlyValue(Decls[I]);
+ else
+ List.AddSubsequentDecl(Decls[I]);
+ }
+}
+
DeclContext::decl_iterator DeclContext::decls_begin() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
@@ -801,8 +881,17 @@ DeclContext::lookup(DeclarationName Name) {
if (PrimaryContext != this)
return PrimaryContext->lookup(Name);
- if (hasExternalVisibleStorage())
- LoadVisibleDeclsFromExternalStorage();
+ if (hasExternalVisibleStorage()) {
+ // Check to see if we've already cached the lookup results.
+ if (LookupPtr) {
+ StoredDeclsMap::iterator I = LookupPtr->find(Name);
+ if (I != LookupPtr->end())
+ return I->second.getLookupResult(getParentASTContext());
+ }
+
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ return Source->FindExternalVisibleDeclsByName(this, Name);
+ }
/// If there is no lookup data structure, build one now by walking
/// all of the linked DeclContexts (in declaration order!) and
@@ -858,9 +947,10 @@ void DeclContext::makeDeclVisibleInContext(NamedDecl *D, bool Recoverable) {
}
// If we already have a lookup data structure, perform the insertion
- // into it. Otherwise, be lazy and don't build that structure until
- // someone asks for it.
- if (LookupPtr || !Recoverable)
+ // into it. If we haven't deserialized externally stored decls, deserialize
+ // them so we can add the decl. Otherwise, be lazy and don't build that
+ // structure until someone asks for it.
+ if (LookupPtr || !Recoverable || hasExternalVisibleStorage())
makeDeclVisibleInContextImpl(D);
// If we are a transparent context, insert into our parent context,
@@ -880,6 +970,12 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
if (isa<ClassTemplateSpecializationDecl>(D))
return;
+ // If there is an external AST source, load any declarations it knows about
+ // with this declaration's name.
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+
ASTContext *C = 0;
if (!LookupPtr) {
C = &getParentASTContext();
@@ -932,7 +1028,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) {
ExternalASTSource *Source = Context.getExternalSource();
assert(Source && "No external AST source available!");
- Data = reinterpret_cast<uintptr_t>(Source->GetDecl(DeclID));
+ Data = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(DeclID));
break;
}
@@ -944,7 +1040,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) {
assert(Source && "No external AST source available!");
for (unsigned I = 0, N = Vector.size(); I != N; ++I)
- Vector[I] = reinterpret_cast<uintptr_t>(Source->GetDecl(Vector[I]));
+ Vector[I] = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(Vector[I]));
Data = (Data & ~0x03) | DK_Decl_Vector;
break;
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
index cd7afd9..dd0fe08 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -32,6 +32,8 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
Abstract(false), HasTrivialConstructor(true),
HasTrivialCopyConstructor(true), HasTrivialCopyAssignment(true),
HasTrivialDestructor(true), ComputedVisibleConversions(false),
+ DeclaredDefaultConstructor(false), DeclaredCopyConstructor(false),
+ DeclaredCopyAssignment(false), DeclaredDestructor(false),
Bases(0), NumBases(0), VBases(0), NumVBases(0),
Definition(D), FirstFriend(0) {
}
@@ -58,6 +60,11 @@ CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
+CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(), 0, 0,
+ SourceLocation());
+}
+
CXXRecordDecl::~CXXRecordDecl() {
}
@@ -159,6 +166,29 @@ bool CXXRecordDecl::hasConstCopyConstructor(ASTContext &Context) const {
return getCopyConstructor(Context, Qualifiers::Const) != 0;
}
+/// \brief Perform a simplistic form of overload resolution that only considers
+/// cv-qualifiers on a single parameter, and return the best overload candidate
+/// (if there is one).
+static CXXMethodDecl *
+GetBestOverloadCandidateSimple(
+ const llvm::SmallVectorImpl<std::pair<CXXMethodDecl *, Qualifiers> > &Cands) {
+ if (Cands.empty())
+ return 0;
+ if (Cands.size() == 1)
+ return Cands[0].first;
+
+ unsigned Best = 0, N = Cands.size();
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.isSupersetOf(Cands[I].second))
+ Best = I;
+
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.isSupersetOf(Cands[I].second))
+ return 0;
+
+ return Cands[Best].first;
+}
+
CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
unsigned TypeQuals) const{
QualType ClassType
@@ -167,6 +197,7 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
= Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(ClassType));
unsigned FoundTQs;
+ llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
DeclContext::lookup_const_iterator Con, ConEnd;
for (llvm::tie(Con, ConEnd) = this->lookup(ConstructorName);
Con != ConEnd; ++Con) {
@@ -175,61 +206,68 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
if (isa<FunctionTemplateDecl>(*Con))
continue;
- if (cast<CXXConstructorDecl>(*Con)->isCopyConstructor(FoundTQs)) {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
if (((TypeQuals & Qualifiers::Const) == (FoundTQs & Qualifiers::Const)) ||
(!(TypeQuals & Qualifiers::Const) && (FoundTQs & Qualifiers::Const)))
- return cast<CXXConstructorDecl>(*Con);
-
+ Found.push_back(std::make_pair(
+ const_cast<CXXConstructorDecl *>(Constructor),
+ Qualifiers::fromCVRMask(FoundTQs)));
}
}
- return 0;
+
+ return cast_or_null<CXXConstructorDecl>(
+ GetBestOverloadCandidateSimple(Found));
}
-bool CXXRecordDecl::hasConstCopyAssignment(ASTContext &Context,
- const CXXMethodDecl *& MD) const {
- QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
- const_cast<CXXRecordDecl*>(this)));
- DeclarationName OpName =Context.DeclarationNames.getCXXOperatorName(OO_Equal);
-
+CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
+ ASTContext &Context = getASTContext();
+ QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = this->lookup(OpName);
- Op != OpEnd; ++Op) {
+ for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) {
// C++ [class.copy]p9:
// A user-declared copy assignment operator is a non-static non-template
// member function of class X with exactly one parameter of type X, X&,
// const X&, volatile X& or const volatile X&.
const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
- if (!Method)
+ if (!Method || Method->isStatic() || Method->getPrimaryTemplate())
continue;
-
- if (Method->isStatic())
- continue;
- if (Method->getPrimaryTemplate())
- continue;
- const FunctionProtoType *FnType =
- Method->getType()->getAs<FunctionProtoType>();
+
+ const FunctionProtoType *FnType
+ = Method->getType()->getAs<FunctionProtoType>();
assert(FnType && "Overloaded operator has no prototype.");
// Don't assert on this; an invalid decl might have been left in the AST.
if (FnType->getNumArgs() != 1 || FnType->isVariadic())
continue;
- bool AcceptsConst = true;
+
QualType ArgType = FnType->getArgType(0);
+ Qualifiers Quals;
if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) {
ArgType = Ref->getPointeeType();
- // Is it a non-const lvalue reference?
- if (!ArgType.isConstQualified())
- AcceptsConst = false;
+ // If we have a const argument and we have a reference to a non-const,
+ // this function does not match.
+ if (ArgIsConst && !ArgType.isConstQualified())
+ continue;
+
+ Quals = ArgType.getQualifiers();
+ } else {
+ // By-value copy-assignment operators are treated like const X&
+ // copy-assignment operators.
+ Quals = Qualifiers::fromCVRMask(Qualifiers::Const);
}
- if (!Context.hasSameUnqualifiedType(ArgType, ClassType))
+
+ if (!Context.hasSameUnqualifiedType(ArgType, Class))
continue;
- MD = Method;
- // We have a single argument of type cv X or cv X&, i.e. we've found the
- // copy assignment operator. Return whether it accepts const arguments.
- return AcceptsConst;
+
+ // Save this copy-assignment operator. It might be "the one".
+ Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals));
}
- assert(isInvalidDecl() &&
- "No copy assignment operator declared in valid code.");
- return false;
+
+ // Use a simplistic form of overload resolution to find the candidate.
+ return GetBestOverloadCandidateSimple(Found);
}
void
@@ -239,6 +277,9 @@ CXXRecordDecl::addedConstructor(ASTContext &Context,
// Note that we have a user-declared constructor.
data().UserDeclaredConstructor = true;
+ // Note that we have no need of an implicitly-declared default constructor.
+ data().DeclaredDefaultConstructor = true;
+
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class (clause 9) with no
// user-declared constructors (12.1) [...].
@@ -258,11 +299,13 @@ CXXRecordDecl::addedConstructor(ASTContext &Context,
// suppress the implicit declaration of a copy constructor.
if (ConDecl->isCopyConstructor()) {
data().UserDeclaredCopyConstructor = true;
-
+ data().DeclaredCopyConstructor = true;
+
// C++ [class.copy]p6:
// A copy constructor is trivial if it is implicitly declared.
// FIXME: C++0x: don't do this for "= default" copy constructors.
data().HasTrivialCopyConstructor = false;
+
}
}
@@ -294,7 +337,8 @@ void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context,
// Suppress the implicit declaration of a copy constructor.
data().UserDeclaredCopyAssignment = true;
-
+ data().DeclaredCopyAssignment = true;
+
// C++ [class.copy]p11:
// A copy assignment operator is trivial if it is implicitly declared.
// FIXME: C++0x: don't do this for "= default" copy operators.
@@ -546,7 +590,8 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
}
CXXConstructorDecl *
-CXXRecordDecl::getDefaultConstructor(ASTContext &Context) {
+CXXRecordDecl::getDefaultConstructor() {
+ ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName ConstructorName
= Context.DeclarationNames.getCXXConstructorName(
@@ -566,7 +611,8 @@ CXXRecordDecl::getDefaultConstructor(ASTContext &Context) {
return 0;
}
-CXXDestructorDecl *CXXRecordDecl::getDestructor(ASTContext &Context) const {
+CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
+ ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName Name
@@ -670,6 +716,10 @@ CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
return getASTContext().overridden_methods_end(this);
}
+unsigned CXXMethodDecl::size_overridden_methods() const {
+ return getASTContext().overridden_methods_size(this);
+}
+
QualType CXXMethodDecl::getThisType(ASTContext &C) const {
// C++ 9.3.2p1: The type of this in a member function of a class X is X*.
// If the member function is declared const, the type of this is const X*,
@@ -693,7 +743,7 @@ bool CXXMethodDecl::hasInlineBody() const {
CheckFn = this;
const FunctionDecl *fn;
- return CheckFn->getBody(fn) && !fn->isOutOfLine();
+ return CheckFn->hasBody(fn) && !fn->isOutOfLine();
}
CXXBaseOrMemberInitializer::
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
index ab3552d..99bfe40 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
@@ -39,3 +39,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
return FD;
}
+
+FriendDecl *FriendDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) FriendDecl(Empty);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
index dc4aacd..adb0e7d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -223,17 +223,24 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
setProtocolList(ProtocolRefs.data(), NumProtoRefs, ProtocolLocs.data(), C);
}
-/// getClassExtension - Find class extension of the given class.
-// FIXME. can speed it up, if need be.
-ObjCCategoryDecl* ObjCInterfaceDecl::getClassExtension() const {
- const ObjCInterfaceDecl* ClassDecl = this;
- for (ObjCCategoryDecl *CDecl = ClassDecl->getCategoryList(); CDecl;
+/// getFirstClassExtension - Find first class extension of the given class.
+ObjCCategoryDecl* ObjCInterfaceDecl::getFirstClassExtension() const {
+ for (ObjCCategoryDecl *CDecl = getCategoryList(); CDecl;
CDecl = CDecl->getNextClassCategory())
if (CDecl->IsClassExtension())
return CDecl;
return 0;
}
+/// getNextClassCategory - Find next class extension in list of categories.
+const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const {
+ for (const ObjCCategoryDecl *CDecl = getNextClassCategory(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
ObjCInterfaceDecl* ClassDecl = this;
@@ -242,11 +249,13 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
clsDeclared = ClassDecl;
return I;
}
- if (const ObjCCategoryDecl *CDecl = ClassDecl->getClassExtension())
+ for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension();
+ CDecl; CDecl = CDecl->getNextClassExtension()) {
if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) {
clsDeclared = ClassDecl;
return I;
}
+ }
ClassDecl = ClassDecl->getSuperClass();
}
@@ -887,7 +896,7 @@ ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id,
SourceLocation AtLoc,
- QualType T,
+ TypeSourceInfo *T,
PropertyControl propControl) {
return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, T);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
index 53949247..765772d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -183,7 +183,7 @@ void DeclPrinter::Print(AccessSpecifier AS) {
case AS_none: assert(0 && "No access specifier!"); break;
case AS_public: Out << "public"; break;
case AS_protected: Out << "protected"; break;
- case AS_private: Out << " private"; break;
+ case AS_private: Out << "private"; break;
}
}
@@ -195,9 +195,6 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (Indent)
Indentation += Policy.Indentation;
- bool PrintAccess = isa<CXXRecordDecl>(DC);
- AccessSpecifier CurAS = AS_none;
-
llvm::SmallVector<Decl*, 2> Decls;
for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
D != DEnd; ++D) {
@@ -205,21 +202,14 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
// Skip over implicit declarations in pretty-printing mode.
if (D->isImplicit()) continue;
// FIXME: Ugly hack so we don't pretty-print the builtin declaration
- // of __builtin_va_list. There should be some other way to check that.
- if (isa<NamedDecl>(*D) && cast<NamedDecl>(*D)->getNameAsString() ==
- "__builtin_va_list")
- continue;
- }
-
- if (PrintAccess) {
- AccessSpecifier AS = D->getAccess();
-
- if (AS != CurAS) {
- if (Indent)
- this->Indent(Indentation - Policy.Indentation);
- Print(AS);
- Out << ":\n";
- CurAS = AS;
+ // of __builtin_va_list or __[u]int128_t. There should be some other way
+ // to check that.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
+ if (IdentifierInfo *II = ND->getIdentifier()) {
+ if (II->isStr("__builtin_va_list") ||
+ II->isStr("__int128_t") || II->isStr("__uint128_t"))
+ continue;
+ }
}
}
@@ -251,6 +241,16 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
Decls.push_back(*D);
continue;
}
+
+ if (isa<AccessSpecDecl>(*D)) {
+ Indentation -= Policy.Indentation;
+ this->Indent();
+ Print(D->getAccess());
+ Out << ":\n";
+ Indentation += Policy.Indentation;
+ continue;
+ }
+
this->Indent();
Visit(*D);
@@ -406,7 +406,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
FieldDecl *FD = BMInitializer->getMember();
Out << FD;
} else {
- Out << QualType(BMInitializer->getBaseClass(), 0).getAsString();
+ Out << QualType(BMInitializer->getBaseClass(),
+ 0).getAsString(Policy);
}
Out << "(";
@@ -653,7 +654,11 @@ void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
Out << "> ";
- Visit(D->getTemplatedDecl());
+ if (isa<TemplateTemplateParmDecl>(D)) {
+ Out << "class " << D->getName();
+ } else {
+ Visit(D->getTemplatedDecl());
+ }
}
//----------------------------------------------------------------------------
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
index 26e291c..9e1d79d 100644
--- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -162,37 +162,19 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
TemplateParameterList *Params,
NamedDecl *Decl,
ClassTemplateDecl *PrevDecl) {
- Common *CommonPtr;
- if (PrevDecl)
- CommonPtr = PrevDecl->CommonPtr;
- else {
- CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
- }
-
- return new (C) ClassTemplateDecl(DC, L, Name, Params, Decl, PrevDecl,
- CommonPtr);
-}
-
-ClassTemplateDecl::~ClassTemplateDecl() {
- assert(CommonPtr == 0 && "ClassTemplateDecl must be explicitly destroyed");
+ ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+ New->setPreviousDeclaration(PrevDecl);
+ return New;
}
void ClassTemplateDecl::Destroy(ASTContext& C) {
- if (!PreviousDeclaration) {
- CommonPtr->~Common();
- C.Deallocate((void*)CommonPtr);
- }
- CommonPtr = 0;
-
- this->~ClassTemplateDecl();
- C.Deallocate((void*)this);
+ Decl::Destroy(C);
}
void ClassTemplateDecl::getPartialSpecializations(
llvm::SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
- = CommonPtr->PartialSpecializations;
+ = getPartialSpecializations();
PS.clear();
PS.resize(PartialSpecs.size());
for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
@@ -219,7 +201,8 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) {
}
QualType
-ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
+ClassTemplateDecl::getInjectedClassNameSpecialization() {
+ Common *CommonPtr = getCommonPtr();
if (!CommonPtr->InjectedClassNameType.isNull())
return CommonPtr->InjectedClassNameType;
@@ -227,7 +210,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
// corresponding to template parameter packs should be pack
// expansions. We already say that in 14.6.2.1p2, so it would be
// better to fix that redundancy.
-
+ ASTContext &Context = getASTContext();
TemplateParameterList *Params = getTemplateParameters();
llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
TemplateArgs.reserve(Params->size());
@@ -240,7 +223,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
Expr *E = new (Context) DeclRefExpr(NTTP,
- NTTP->getType().getNonReferenceType(),
+ NTTP->getType().getNonLValueExprType(Context),
NTTP->getLocation());
TemplateArgs.push_back(TemplateArgument(E));
} else {
@@ -256,6 +239,20 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
return CommonPtr->InjectedClassNameType;
}
+ClassTemplateDecl::Common *ClassTemplateDecl::getCommonPtr() {
+ // Find the first declaration of this function template.
+ ClassTemplateDecl *First = this;
+ while (First->getPreviousDeclaration())
+ First = First->getPreviousDeclaration();
+
+ if (First->CommonOrPrev.isNull()) {
+ Common *CommonPtr = new (getASTContext()) Common;
+ getASTContext().AddDeallocation(DeallocateCommon, CommonPtr);
+ First->CommonOrPrev = CommonPtr;
+ }
+ return First->CommonOrPrev.get<Common*>();
+}
+
//===----------------------------------------------------------------------===//
// TemplateTypeParm Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
@@ -269,6 +266,12 @@ TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) TemplateTypeParmDecl(DC, L, Id, Typename, Type, ParameterPack);
}
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) TemplateTypeParmDecl(0, SourceLocation(), 0, false,
+ QualType(), false);
+}
+
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
return DefaultArgument->getTypeLoc().getSourceRange().getBegin();
}
@@ -294,8 +297,9 @@ NonTypeTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
- return DefaultArgument? DefaultArgument->getSourceRange().getBegin()
- : SourceLocation();
+ return hasDefaultArgument()
+ ? getDefaultArgument()->getSourceRange().getBegin()
+ : SourceLocation();
}
//===----------------------------------------------------------------------===//
@@ -393,6 +397,13 @@ TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
}
}
+TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs)
+ : NumFlatArguments(0), NumStructuredArguments(0) {
+ init(Context, Args, NumArgs);
+}
+
/// Produces a shallow copy of the given template argument list. This
/// assumes that the input argument list outlives it. This takes the list as
/// a pointer to avoid looking like a copy constructor, since this really
@@ -403,6 +414,23 @@ TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList *Other)
StructuredArguments(Other->StructuredArguments.getPointer(), false),
NumStructuredArguments(Other->NumStructuredArguments) { }
+void TemplateArgumentList::init(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+assert(NumFlatArguments == 0 && NumStructuredArguments == 0 &&
+ "Already initialized!");
+
+NumFlatArguments = NumStructuredArguments = NumArgs;
+TemplateArgument *NewArgs = new (Context) TemplateArgument[NumArgs];
+std::copy(Args, Args+NumArgs, NewArgs);
+FlatArguments.setPointer(NewArgs);
+FlatArguments.setInt(1); // Owns the pointer.
+
+// Just reuse the flat arguments array.
+StructuredArguments.setPointer(NewArgs);
+StructuredArguments.setInt(0); // Doesn't own the pointer.
+}
+
void TemplateArgumentList::Destroy(ASTContext &C) {
if (FlatArguments.getInt())
C.Deallocate((void*)FlatArguments.getPointer());
@@ -425,11 +453,17 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
SpecializedTemplate->getIdentifier(),
PrevDecl),
SpecializedTemplate(SpecializedTemplate),
- TypeAsWritten(0),
+ ExplicitInfo(0),
TemplateArgs(Context, Builder, /*TakeArgs=*/true),
SpecializationKind(TSK_Undeclared) {
}
+ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK)
+ : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), 0, 0),
+ ExplicitInfo(0),
+ SpecializationKind(TSK_Undeclared) {
+}
+
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
DeclContext *DC, SourceLocation L,
@@ -447,7 +481,15 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
return Result;
}
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, EmptyShell Empty) {
+ return
+ new (Context)ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+}
+
void ClassTemplateSpecializationDecl::Destroy(ASTContext &C) {
+ delete ExplicitInfo;
+
if (SpecializedPartialSpecialization *PartialSpec
= SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
C.Deallocate(PartialSpec);
@@ -508,6 +550,25 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
return Result;
}
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::Create(ASTContext &Context,
+ EmptyShell Empty) {
+ return new (Context)ClassTemplatePartialSpecializationDecl();
+}
+
+void ClassTemplatePartialSpecializationDecl::
+initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos) {
+ assert(ArgsAsWritten == 0 && "ArgsAsWritten already set");
+ unsigned N = ArgInfos.size();
+ TemplateArgumentLoc *ClonedArgs
+ = new (getASTContext()) TemplateArgumentLoc[N];
+ for (unsigned I = 0; I != N; ++I)
+ ClonedArgs[I] = ArgInfos[I];
+
+ ArgsAsWritten = ClonedArgs;
+ NumArgsAsWritten = N;
+}
+
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
index c38cec3..6524a31 100644
--- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -37,7 +37,7 @@ bool Expr::isKnownToHaveBooleanValue() const {
// If this value has _Bool type, it is obvious 0/1.
if (getType()->isBooleanType()) return true;
// If this is a non-scalar-integer type, we don't care enough to try.
- if (!getType()->isIntegralType()) return false;
+ if (!getType()->isIntegralOrEnumerationType()) return false;
if (const ParenExpr *PE = dyn_cast<ParenExpr>(this))
return PE->getSubExpr()->isKnownToHaveBooleanValue();
@@ -52,7 +52,9 @@ bool Expr::isKnownToHaveBooleanValue() const {
}
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(this))
+ // Only look through implicit casts. If the user writes
+ // '(int) (a && b)' treat it as an arbitrary int.
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(this))
return CE->getSubExpr()->isKnownToHaveBooleanValue();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(this)) {
@@ -111,10 +113,14 @@ void ExplicitTemplateArgumentList::copyInto(
Info.addArgument(getTemplateArgs()[I]);
}
+std::size_t ExplicitTemplateArgumentList::sizeFor(unsigned NumTemplateArgs) {
+ return sizeof(ExplicitTemplateArgumentList) +
+ sizeof(TemplateArgumentLoc) * NumTemplateArgs;
+}
+
std::size_t ExplicitTemplateArgumentList::sizeFor(
const TemplateArgumentListInfo &Info) {
- return sizeof(ExplicitTemplateArgumentList) +
- sizeof(TemplateArgumentLoc) * Info.size();
+ return sizeFor(Info.size());
}
void DeclRefExpr::computeDependence() {
@@ -158,7 +164,7 @@ void DeclRefExpr::computeDependence() {
// (VD) - a constant with integral or enumeration type and is
// initialized with an expression that is value-dependent.
else if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if (Var->getType()->isIntegralType() &&
+ if (Var->getType()->isIntegralOrEnumerationType() &&
Var->getType().getCVRQualifiers() == Qualifiers::Const) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent())
@@ -222,6 +228,19 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
TemplateArgs, T);
}
+DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context, bool HasQualifier,
+ unsigned NumTemplateArgs) {
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (HasQualifier)
+ Size += sizeof(NameQualifier);
+
+ if (NumTemplateArgs)
+ Size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = Context.Allocate(Size, llvm::alignof<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(EmptyShell());
+}
+
SourceRange DeclRefExpr::getSourceRange() const {
// FIXME: Does not handle multi-token names well, e.g., operator[].
SourceRange R(Loc);
@@ -557,7 +576,10 @@ QualType CallExpr::getCallReturnType() const {
CalleeType = FnTypePtr->getPointeeType();
else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>())
CalleeType = BPT->getPointeeType();
-
+ else if (const MemberPointerType *MPT
+ = CalleeType->getAs<MemberPointerType>())
+ CalleeType = MPT->getPointeeType();
+
const FunctionType *FnType = CalleeType->getAs<FunctionType>();
return FnType->getResultType();
}
@@ -662,6 +684,8 @@ const char *CastExpr::getCastKindName() const {
return "Unknown";
case CastExpr::CK_BitCast:
return "BitCast";
+ case CastExpr::CK_LValueBitCast:
+ return "LValueBitCast";
case CastExpr::CK_NoOp:
return "NoOp";
case CastExpr::CK_BaseToDerived:
@@ -968,7 +992,8 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
switch (BO->getOpcode()) {
default:
break;
- // Consider ',', '||', '&&' to have side effects if the LHS or RHS does.
+ // Consider the RHS of comma for side effects. LHS was checked by
+ // Sema::CheckCommaOperands.
case BinaryOperator::Comma:
// ((foo = <blah>), 0) is an idiom for hiding the result (and
// lvalue-ness) of an assignment written in a macro.
@@ -976,10 +1001,14 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
if (IE->getValue() == 0)
return false;
+ return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ // Consider '||', '&&' to have side effects if the LHS or RHS does.
case BinaryOperator::LAnd:
case BinaryOperator::LOr:
- return (BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
- BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ break;
}
if (BO->isAssignmentOp())
return false;
@@ -1140,378 +1169,6 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
}
}
-/// DeclCanBeLvalue - Determine whether the given declaration can be
-/// an lvalue. This is a helper routine for isLvalue.
-static bool DeclCanBeLvalue(const NamedDecl *Decl, ASTContext &Ctx) {
- // C++ [temp.param]p6:
- // A non-type non-reference template-parameter is not an lvalue.
- if (const NonTypeTemplateParmDecl *NTTParm
- = dyn_cast<NonTypeTemplateParmDecl>(Decl))
- return NTTParm->getType()->isReferenceType();
-
- return isa<VarDecl>(Decl) || isa<FieldDecl>(Decl) ||
- // C++ 3.10p2: An lvalue refers to an object or function.
- (Ctx.getLangOptions().CPlusPlus &&
- (isa<FunctionDecl>(Decl) || isa<FunctionTemplateDecl>(Decl)));
-}
-
-/// isLvalue - C99 6.3.2.1: an lvalue is an expression with an object type or an
-/// incomplete type other than void. Nonarray expressions that can be lvalues:
-/// - name, where name must be a variable
-/// - e[i]
-/// - (e), where e must be an lvalue
-/// - e.name, where e must be an lvalue
-/// - e->name
-/// - *e, the type of e cannot be a function type
-/// - string-constant
-/// - (__real__ e) and (__imag__ e) where e is an lvalue [GNU extension]
-/// - reference type [C++ [expr]]
-///
-Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
- assert(!TR->isReferenceType() && "Expressions can't have reference type.");
-
- isLvalueResult Res = isLvalueInternal(Ctx);
- if (Res != LV_Valid || Ctx.getLangOptions().CPlusPlus)
- return Res;
-
- // first, check the type (C99 6.3.2.1). Expressions with function
- // type in C are not lvalues, but they can be lvalues in C++.
- if (TR->isFunctionType() || TR == Ctx.OverloadTy)
- return LV_NotObjectType;
-
- // Allow qualified void which is an incomplete type other than void (yuck).
- if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers())
- return LV_IncompleteVoidType;
-
- return LV_Valid;
-}
-
-// Check whether the expression can be sanely treated like an l-value
-Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
- switch (getStmtClass()) {
- case ObjCIsaExprClass:
- case StringLiteralClass: // C99 6.5.1p4
- case ObjCEncodeExprClass: // @encode behaves like its string in every way.
- return LV_Valid;
- case ArraySubscriptExprClass: // C99 6.5.3p4 (e1[e2] == (*((e1)+(e2))))
- // For vectors, make sure base is an lvalue (i.e. not a function call).
- if (cast<ArraySubscriptExpr>(this)->getBase()->getType()->isVectorType())
- return cast<ArraySubscriptExpr>(this)->getBase()->isLvalue(Ctx);
- return LV_Valid;
- case DeclRefExprClass: { // C99 6.5.1p2
- const NamedDecl *RefdDecl = cast<DeclRefExpr>(this)->getDecl();
- if (DeclCanBeLvalue(RefdDecl, Ctx))
- return LV_Valid;
- break;
- }
- case BlockDeclRefExprClass: {
- const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(this);
- if (isa<VarDecl>(BDR->getDecl()))
- return LV_Valid;
- break;
- }
- case MemberExprClass: {
- const MemberExpr *m = cast<MemberExpr>(this);
- if (Ctx.getLangOptions().CPlusPlus) { // C++ [expr.ref]p4:
- NamedDecl *Member = m->getMemberDecl();
- // C++ [expr.ref]p4:
- // If E2 is declared to have type "reference to T", then E1.E2
- // is an lvalue.
- if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
- if (Value->getType()->isReferenceType())
- return LV_Valid;
-
- // -- If E2 is a static data member [...] then E1.E2 is an lvalue.
- if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
- return LV_Valid;
-
- // -- If E2 is a non-static data member [...]. If E1 is an
- // lvalue, then E1.E2 is an lvalue.
- if (isa<FieldDecl>(Member)) {
- if (m->isArrow())
- return LV_Valid;
- return m->getBase()->isLvalue(Ctx);
- }
-
- // -- If it refers to a static member function [...], then
- // E1.E2 is an lvalue.
- // -- Otherwise, if E1.E2 refers to a non-static member
- // function [...], then E1.E2 is not an lvalue.
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
- return Method->isStatic()? LV_Valid : LV_MemberFunction;
-
- // -- If E2 is a member enumerator [...], the expression E1.E2
- // is not an lvalue.
- if (isa<EnumConstantDecl>(Member))
- return LV_InvalidExpression;
-
- // Not an lvalue.
- return LV_InvalidExpression;
- }
-
- // C99 6.5.2.3p4
- if (m->isArrow())
- return LV_Valid;
- Expr *BaseExp = m->getBase();
- if (BaseExp->getStmtClass() == ObjCPropertyRefExprClass ||
- BaseExp->getStmtClass() == ObjCImplicitSetterGetterRefExprClass)
- return LV_SubObjCPropertySetting;
- return
- BaseExp->isLvalue(Ctx);
- }
- case UnaryOperatorClass:
- if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Deref)
- return LV_Valid; // C99 6.5.3p4
-
- if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Real ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Imag ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Extension)
- return cast<UnaryOperator>(this)->getSubExpr()->isLvalue(Ctx); // GNU.
-
- if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.pre.incr]p1
- (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreInc ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreDec))
- return LV_Valid;
- break;
- case ImplicitCastExprClass:
- if (cast<ImplicitCastExpr>(this)->isLvalueCast())
- return LV_Valid;
-
- // If this is a conversion to a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus && getType()->isRecordType())
- return LV_ClassTemporary;
-
- break;
- case ParenExprClass: // C99 6.5.1p5
- return cast<ParenExpr>(this)->getSubExpr()->isLvalue(Ctx);
- case BinaryOperatorClass:
- case CompoundAssignOperatorClass: {
- const BinaryOperator *BinOp = cast<BinaryOperator>(this);
-
- if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.comma]p1
- BinOp->getOpcode() == BinaryOperator::Comma)
- return BinOp->getRHS()->isLvalue(Ctx);
-
- // C++ [expr.mptr.oper]p6
- // The result of a .* expression is an lvalue only if its first operand is
- // an lvalue and its second operand is a pointer to data member.
- if (BinOp->getOpcode() == BinaryOperator::PtrMemD &&
- !BinOp->getType()->isFunctionType())
- return BinOp->getLHS()->isLvalue(Ctx);
-
- // The result of an ->* expression is an lvalue only if its second operand
- // is a pointer to data member.
- if (BinOp->getOpcode() == BinaryOperator::PtrMemI &&
- !BinOp->getType()->isFunctionType()) {
- QualType Ty = BinOp->getRHS()->getType();
- if (Ty->isMemberPointerType() && !Ty->isMemberFunctionPointerType())
- return LV_Valid;
- }
-
- if (!BinOp->isAssignmentOp())
- return LV_InvalidExpression;
-
- if (Ctx.getLangOptions().CPlusPlus)
- // C++ [expr.ass]p1:
- // The result of an assignment operation [...] is an lvalue.
- return LV_Valid;
-
-
- // C99 6.5.16:
- // An assignment expression [...] is not an lvalue.
- return LV_InvalidExpression;
- }
- case CallExprClass:
- case CXXOperatorCallExprClass:
- case CXXMemberCallExprClass: {
- // C++0x [expr.call]p10
- // A function call is an lvalue if and only if the result type
- // is an lvalue reference.
- QualType ReturnType = cast<CallExpr>(this)->getCallReturnType();
- if (ReturnType->isLValueReferenceType())
- return LV_Valid;
-
- // If the function is returning a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus && ReturnType->isRecordType())
- return LV_ClassTemporary;
-
- break;
- }
- case CompoundLiteralExprClass: // C99 6.5.2.5p5
- // FIXME: Is this what we want in C++?
- return LV_Valid;
- case ChooseExprClass:
- // __builtin_choose_expr is an lvalue if the selected operand is.
- return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)->isLvalue(Ctx);
- case ExtVectorElementExprClass:
- if (cast<ExtVectorElementExpr>(this)->containsDuplicateElements())
- return LV_DuplicateVectorComponents;
- return LV_Valid;
- case ObjCIvarRefExprClass: // ObjC instance variables are lvalues.
- return LV_Valid;
- case ObjCPropertyRefExprClass: // FIXME: check if read-only property.
- return LV_Valid;
- case ObjCImplicitSetterGetterRefExprClass:
- // FIXME: check if read-only property.
- return LV_Valid;
- case PredefinedExprClass:
- return LV_Valid;
- case UnresolvedLookupExprClass:
- case UnresolvedMemberExprClass:
- return LV_Valid;
- case CXXDefaultArgExprClass:
- return cast<CXXDefaultArgExpr>(this)->getExpr()->isLvalue(Ctx);
- case CStyleCastExprClass:
- case CXXFunctionalCastExprClass:
- case CXXStaticCastExprClass:
- case CXXDynamicCastExprClass:
- case CXXReinterpretCastExprClass:
- case CXXConstCastExprClass:
- // The result of an explicit cast is an lvalue if the type we are
- // casting to is an lvalue reference type. See C++ [expr.cast]p1,
- // C++ [expr.static.cast]p2, C++ [expr.dynamic.cast]p2,
- // C++ [expr.reinterpret.cast]p1, C++ [expr.const.cast]p1.
- if (cast<ExplicitCastExpr>(this)->getTypeAsWritten()->
- isLValueReferenceType())
- return LV_Valid;
-
- // If this is a conversion to a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus &&
- cast<ExplicitCastExpr>(this)->getTypeAsWritten()->isRecordType())
- return LV_ClassTemporary;
-
- break;
- case CXXTypeidExprClass:
- // C++ 5.2.8p1: The result of a typeid expression is an lvalue of ...
- return LV_Valid;
- case CXXBindTemporaryExprClass:
- return cast<CXXBindTemporaryExpr>(this)->getSubExpr()->
- isLvalueInternal(Ctx);
- case CXXBindReferenceExprClass:
- // Something that's bound to a reference is always an lvalue.
- return LV_Valid;
- case ConditionalOperatorClass: {
- // Complicated handling is only for C++.
- if (!Ctx.getLangOptions().CPlusPlus)
- return LV_InvalidExpression;
-
- // Sema should have taken care to ensure that a CXXTemporaryObjectExpr is
- // everywhere there's an object converted to an rvalue. Also, any other
- // casts should be wrapped by ImplicitCastExprs. There's just the special
- // case involving throws to work out.
- const ConditionalOperator *Cond = cast<ConditionalOperator>(this);
- Expr *True = Cond->getTrueExpr();
- Expr *False = Cond->getFalseExpr();
- // C++0x 5.16p2
- // If either the second or the third operand has type (cv) void, [...]
- // the result [...] is an rvalue.
- if (True->getType()->isVoidType() || False->getType()->isVoidType())
- return LV_InvalidExpression;
-
- // Both sides must be lvalues for the result to be an lvalue.
- if (True->isLvalue(Ctx) != LV_Valid || False->isLvalue(Ctx) != LV_Valid)
- return LV_InvalidExpression;
-
- // That's it.
- return LV_Valid;
- }
-
- case Expr::CXXExprWithTemporariesClass:
- return cast<CXXExprWithTemporaries>(this)->getSubExpr()->isLvalue(Ctx);
-
- case Expr::ObjCMessageExprClass:
- if (const ObjCMethodDecl *Method
- = cast<ObjCMessageExpr>(this)->getMethodDecl())
- if (Method->getResultType()->isLValueReferenceType())
- return LV_Valid;
- break;
-
- case Expr::CXXConstructExprClass:
- case Expr::CXXTemporaryObjectExprClass:
- case Expr::CXXZeroInitValueExprClass:
- return LV_ClassTemporary;
-
- default:
- break;
- }
- return LV_InvalidExpression;
-}
-
-/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
-/// does not have an incomplete type, does not have a const-qualified type, and
-/// if it is a structure or union, does not have any member (including,
-/// recursively, any member or element of all contained aggregates or unions)
-/// with a const-qualified type.
-Expr::isModifiableLvalueResult
-Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
- isLvalueResult lvalResult = isLvalue(Ctx);
-
- switch (lvalResult) {
- case LV_Valid:
- // C++ 3.10p11: Functions cannot be modified, but pointers to
- // functions can be modifiable.
- if (Ctx.getLangOptions().CPlusPlus && TR->isFunctionType())
- return MLV_NotObjectType;
- break;
-
- case LV_NotObjectType: return MLV_NotObjectType;
- case LV_IncompleteVoidType: return MLV_IncompleteVoidType;
- case LV_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
- case LV_InvalidExpression:
- // If the top level is a C-style cast, and the subexpression is a valid
- // lvalue, then this is probably a use of the old-school "cast as lvalue"
- // GCC extension. We don't support it, but we want to produce good
- // diagnostics when it happens so that the user knows why.
- if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(IgnoreParens())) {
- if (CE->getSubExpr()->isLvalue(Ctx) == LV_Valid) {
- if (Loc)
- *Loc = CE->getLParenLoc();
- return MLV_LValueCast;
- }
- }
- return MLV_InvalidExpression;
- case LV_MemberFunction: return MLV_MemberFunction;
- case LV_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
- case LV_ClassTemporary:
- return MLV_ClassTemporary;
- }
-
- // The following is illegal:
- // void takeclosure(void (^C)(void));
- // void func() { int x = 1; takeclosure(^{ x = 7; }); }
- //
- if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(this)) {
- if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
- return MLV_NotBlockQualified;
- }
-
- // Assigning to an 'implicit' property?
- if (const ObjCImplicitSetterGetterRefExpr* Expr =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(this)) {
- if (Expr->getSetterMethod() == 0)
- return MLV_NoSetterProperty;
- }
-
- QualType CT = Ctx.getCanonicalType(getType());
-
- if (CT.isConstQualified())
- return MLV_ConstQualified;
- if (CT->isArrayType())
- return MLV_ArrayType;
- if (CT->isIncompleteType())
- return MLV_IncompleteType;
-
- if (const RecordType *r = CT->getAs<RecordType>()) {
- if (r->hasConstFields())
- return MLV_ConstQualified;
- }
-
- return MLV_Valid;
-}
-
/// isOBJCGCCandidate - Check if an expression is objc gc'able.
/// returns true, if it is; false otherwise.
bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
@@ -1596,7 +1253,7 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
// We ignore integer <-> casts that are of the same width, ptr<->ptr and
- // ptr<->int casts of the same width. We also ignore all identify casts.
+ // ptr<->int casts of the same width. We also ignore all identity casts.
Expr *SE = P->getSubExpr();
if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
@@ -1604,8 +1261,10 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
continue;
}
- if ((E->getType()->isPointerType() || E->getType()->isIntegralType()) &&
- (SE->getType()->isPointerType() || SE->getType()->isIntegralType()) &&
+ if ((E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SE->getType()->isPointerType() ||
+ SE->getType()->isIntegralType(Ctx)) &&
Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
E = SE;
continue;
@@ -1795,7 +1454,7 @@ bool Expr::isNullPointerConstant(ASTContext &Ctx,
// If the unthinkable happens, fall through to the safest alternative.
case NPC_ValueDependentIsNull:
- return isTypeDependent() || getType()->isIntegralType();
+ return isTypeDependent() || getType()->isIntegralType(Ctx);
case NPC_ValueDependentIsNotNull:
return false;
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
index d1a2b26..c2548ec 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -74,27 +74,27 @@ Stmt::child_iterator CXXDefaultArgExpr::child_end() {
return child_iterator();
}
-// CXXZeroInitValueExpr
-Stmt::child_iterator CXXZeroInitValueExpr::child_begin() {
+// CXXScalarValueInitExpr
+Stmt::child_iterator CXXScalarValueInitExpr::child_begin() {
return child_iterator();
}
-Stmt::child_iterator CXXZeroInitValueExpr::child_end() {
+Stmt::child_iterator CXXScalarValueInitExpr::child_end() {
return child_iterator();
}
// CXXNewExpr
CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
Expr **placementArgs, unsigned numPlaceArgs,
- bool parenTypeId, Expr *arraySize,
+ SourceRange TypeIdParens, Expr *arraySize,
CXXConstructorDecl *constructor, bool initializer,
Expr **constructorArgs, unsigned numConsArgs,
FunctionDecl *operatorDelete, QualType ty,
SourceLocation startLoc, SourceLocation endLoc)
: Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()),
- GlobalNew(globalNew), ParenTypeId(parenTypeId),
+ GlobalNew(globalNew),
Initializer(initializer), SubExprs(0), OperatorNew(operatorNew),
OperatorDelete(operatorDelete), Constructor(constructor),
- StartLoc(startLoc), EndLoc(endLoc) {
+ TypeIdParens(TypeIdParens), StartLoc(startLoc), EndLoc(endLoc) {
AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
unsigned i = 0;
@@ -190,6 +190,18 @@ UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
return ULE;
}
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedLookupExpr);
+ if (NumTemplateArgs != 0)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignof<UnresolvedLookupExpr>());
+ UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ return E;
+}
+
OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
bool Dependent, NestedNameSpecifier *Qualifier,
SourceRange QRange, DeclarationName Name,
@@ -197,19 +209,28 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
: Expr(K, T, Dependent, Dependent),
- Results(0), NumResults(End - Begin), Name(Name), Qualifier(Qualifier),
+ Results(0), NumResults(0), Name(Name), Qualifier(Qualifier),
QualifierRange(QRange), NameLoc(NameLoc),
HasExplicitTemplateArgs(HasTemplateArgs)
{
+ initializeResults(C, Begin, End);
+}
+
+void OverloadExpr::initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Results == 0 && "Results already initialized!");
+ NumResults = End - Begin;
if (NumResults) {
Results = static_cast<DeclAccessPair *>(
C.Allocate(sizeof(DeclAccessPair) * NumResults,
llvm::alignof<DeclAccessPair>()));
memcpy(Results, &*Begin.getIterator(),
- (End - Begin) * sizeof(DeclAccessPair));
+ NumResults * sizeof(DeclAccessPair));
}
}
+
bool OverloadExpr::ComputeDependence(UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
const TemplateArgumentListInfo *Args) {
@@ -269,6 +290,19 @@ DependentScopeDeclRefExpr::Create(ASTContext &C,
return DRE;
}
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (NumTemplateArgs)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size);
+
+ return new (Mem) DependentScopeDeclRefExpr(QualType(), 0, SourceRange(),
+ DeclarationName(),SourceLocation(),
+ NumTemplateArgs != 0);
+}
+
StmtIterator DependentScopeDeclRefExpr::child_begin() {
return child_iterator();
}
@@ -535,14 +569,6 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
}
}
-CXXConstructExpr::CXXConstructExpr(EmptyShell Empty, ASTContext &C,
- unsigned numargs)
- : Expr(CXXConstructExprClass, Empty), Args(0), NumArgs(numargs)
-{
- if (NumArgs)
- Args = new (C) Stmt*[NumArgs];
-}
-
void CXXConstructExpr::DoDestroy(ASTContext &C) {
DestroyChildren(C);
if (Args)
@@ -656,6 +682,14 @@ CXXUnresolvedConstructExpr::Create(ASTContext &C,
Args, NumArgs, RParenLoc);
}
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) {
+ Stmt::EmptyShell Empty;
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+}
+
Stmt::child_iterator CXXUnresolvedConstructExpr::child_begin() {
return child_iterator(reinterpret_cast<Stmt **>(this + 1));
}
@@ -714,6 +748,29 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C,
Member, MemberLoc, TemplateArgs);
}
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs) {
+ if (NumTemplateArgs == 0)
+ return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(), 0,
+ SourceRange(), 0,
+ DeclarationName(),
+ SourceLocation());
+
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size, llvm::alignof<CXXDependentScopeMemberExpr>());
+ CXXDependentScopeMemberExpr *E
+ = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(), 0,
+ SourceRange(), 0,
+ DeclarationName(),
+ SourceLocation(), 0);
+ E->HasExplicitTemplateArgs = true;
+ return E;
+}
+
Stmt::child_iterator CXXDependentScopeMemberExpr::child_begin() {
return child_iterator(&Base);
}
@@ -770,6 +827,18 @@ UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent,
Member, MemberLoc, TemplateArgs, Begin, End);
}
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (NumTemplateArgs != 0)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>());
+ UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ return E;
+}
+
CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
// Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
new file mode 100644
index 0000000..60ac347
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -0,0 +1,471 @@
+//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Expr::classify.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ErrorHandling.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+typedef Expr::Classification Cl;
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const ConditionalOperator *E);
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc);
+
+Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ Cl::Kinds kind = ClassifyInternal(Ctx, this);
+ // C99 6.3.2.1: An lvalue is an expression with an object type or an
+ // incomplete type other than void.
+ if (!Ctx.getLangOptions().CPlusPlus) {
+ // Thus, no functions.
+ if (TR->isFunctionType() || TR == Ctx.OverloadTy)
+ kind = Cl::CL_Function;
+ // No void either, but qualified void is OK because it is "other than void".
+ else if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers())
+ kind = Cl::CL_Void;
+ }
+
+ Cl::ModifiableType modifiable = Cl::CM_Untested;
+ if (Loc)
+ modifiable = IsModifiable(Ctx, this, kind, *Loc);
+ return Classification(kind, modifiable);
+}
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
+ // This function takes the first stab at classifying expressions.
+ const LangOptions &Lang = Ctx.getLangOptions();
+
+ switch (E->getStmtClass()) {
+ // First come the expressions that are always lvalues, unconditionally.
+
+ case Expr::ObjCIsaExprClass:
+ // C++ [expr.prim.general]p1: A string literal is an lvalue.
+ case Expr::StringLiteralClass:
+ // @encode is equivalent to its string
+ case Expr::ObjCEncodeExprClass:
+ // __func__ and friends are too.
+ case Expr::PredefinedExprClass:
+ // Property references are lvalues
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
+ case Expr::CXXTypeidExprClass:
+ // Unresolved lookups get classified as lvalues.
+ // FIXME: Is this wise? Should they get their own kind?
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ // ObjC instance variables are lvalues
+ // FIXME: ObjC++0x might have different rules
+ case Expr::ObjCIvarRefExprClass:
+ // C99 6.5.2.5p5 says that compound literals are lvalues.
+ // FIXME: C++ might have a different opinion.
+ case Expr::CompoundLiteralExprClass:
+ return Cl::CL_LValue;
+
+ // Next come the complicated cases.
+
+ // C++ [expr.sub]p1: The result is an lvalue of type "T".
+ // However, subscripting vector types is more like member access.
+ case Expr::ArraySubscriptExprClass:
+ if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
+ return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ return Cl::CL_LValue;
+
+ // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
+ // function or variable and a prvalue otherwise.
+ case Expr::DeclRefExprClass:
+ return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
+ // We deal with names referenced from blocks the same way.
+ case Expr::BlockDeclRefExprClass:
+ return ClassifyDecl(Ctx, cast<BlockDeclRefExpr>(E)->getDecl());
+
+ // Member access is complex.
+ case Expr::MemberExprClass:
+ return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
+
+ case Expr::UnaryOperatorClass:
+ switch (cast<UnaryOperator>(E)->getOpcode()) {
+ // C++ [expr.unary.op]p1: The unary * operator performs indirection:
+ // [...] the result is an lvalue referring to the object or function
+ // to which the expression points.
+ case UnaryOperator::Deref:
+ return Cl::CL_LValue;
+
+ // GNU extensions, simply look through them.
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ case UnaryOperator::Extension:
+ return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+
+ // C++ [expr.pre.incr]p1: The result is the updated operand; it is an
+ // lvalue, [...]
+ // Not so in C.
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec:
+ return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
+
+ default:
+ return Cl::CL_PRValue;
+ }
+
+ // Implicit casts are lvalues if they're lvalue casts. Other than that, we
+ // only specifically record class temporaries.
+ case Expr::ImplicitCastExprClass:
+ if (cast<ImplicitCastExpr>(E)->isLvalueCast())
+ return Cl::CL_LValue;
+ return Lang.CPlusPlus && E->getType()->isRecordType() ?
+ Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ // C++ [expr.prim.general]p4: The presence of parentheses does not affect
+ // whether the expression is an lvalue.
+ case Expr::ParenExprClass:
+ return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
+
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ // C doesn't have any binary expressions that are lvalues.
+ if (Lang.CPlusPlus)
+ return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
+ return Cl::CL_PRValue;
+
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
+
+ // __builtin_choose_expr is equivalent to the chosen expression.
+ case Expr::ChooseExprClass:
+ return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr(Ctx));
+
+ // Extended vector element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::ExtVectorElementExprClass:
+ return cast<ExtVectorElementExpr>(E)->containsDuplicateElements() ?
+ Cl::CL_DuplicateVectorComponents : Cl::CL_LValue;
+
+ // Simply look at the actual default argument.
+ case Expr::CXXDefaultArgExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ // Same idea for temporary binding.
+ case Expr::CXXBindTemporaryExprClass:
+ return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+
+ // And the temporary lifetime guard.
+ case Expr::CXXExprWithTemporariesClass:
+ return ClassifyInternal(Ctx, cast<CXXExprWithTemporaries>(E)->getSubExpr());
+
+ // Casts depend completely on the target type. All casts work the same.
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ // Only in C++ can casts be interesting at all.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+
+ case Expr::ConditionalOperatorClass:
+ // Once again, only C++ is interesting.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyConditional(Ctx, cast<ConditionalOperator>(E));
+
+ // ObjC message sends are effectively function calls, if the target function
+ // is known.
+ case Expr::ObjCMessageExprClass:
+ if (const ObjCMethodDecl *Method =
+ cast<ObjCMessageExpr>(E)->getMethodDecl()) {
+ return ClassifyUnnamed(Ctx, Method->getResultType());
+ }
+
+ // Some C++ expressions are always class temporaries.
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ return Cl::CL_ClassTemporary;
+
+ // Everything we haven't handled is a prvalue.
+ default:
+ return Cl::CL_PRValue;
+ }
+}
+
+/// ClassifyDecl - Return the classification of an expression referencing the
+/// given declaration.
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
+ // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
+ // function, variable, or data member and a prvalue otherwise.
+ // In C, functions are not lvalues.
+ // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
+ // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
+ // special-case this.
+ bool islvalue;
+ if (const NonTypeTemplateParmDecl *NTTParm =
+ dyn_cast<NonTypeTemplateParmDecl>(D))
+ islvalue = NTTParm->getType()->isReferenceType();
+ else
+ islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ (Ctx.getLangOptions().CPlusPlus &&
+ (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
+
+ return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
+}
+
+/// ClassifyUnnamed - Return the classification of an expression yielding an
+/// unnamed value of the given type. This applies in particular to function
+/// calls and casts.
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
+ // In C, function calls are always rvalues.
+ if (!Ctx.getLangOptions().CPlusPlus) return Cl::CL_PRValue;
+
+ // C++ [expr.call]p10: A function call is an lvalue if the result type is an
+ // lvalue reference type or an rvalue reference to function type, an xvalue
+ // if the result type is an rvalue refernence to object type, and a prvalue
+ // otherwise.
+ if (T->isLValueReferenceType())
+ return Cl::CL_LValue;
+ const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ if (!RV) // Could still be a class temporary, though.
+ return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
+}
+
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
+ // Handle C first, it's easier.
+ if (!Ctx.getLangOptions().CPlusPlus) {
+ // C99 6.5.2.3p3
+ // For dot access, the expression is an lvalue if the first part is. For
+ // arrow access, it always is an lvalue.
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ // ObjC property accesses are not lvalues, but get special treatment.
+ Expr *Base = E->getBase();
+ if (isa<ObjCPropertyRefExpr>(Base) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, Base);
+ }
+
+ NamedDecl *Member = E->getMemberDecl();
+ // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
+ // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
+ // E1.E2 is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return Cl::CL_LValue;
+
+ // Otherwise, one of the following rules applies.
+ // -- If E2 is a static member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return Cl::CL_LValue;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
+ // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
+ // otherwise, it is a prvalue.
+ if (isa<FieldDecl>(Member)) {
+ // *E1 is an lvalue
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ return ClassifyInternal(Ctx, E->getBase());
+ }
+
+ // -- If E2 is a [...] member function, [...]
+ // -- If it refers to a static member function [...], then E1.E2 is an
+ // lvalue; [...]
+ // -- Otherwise [...] E1.E2 is a prvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
+ // So is everything else we haven't handled yet.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
+ assert(Ctx.getLangOptions().CPlusPlus &&
+ "This is only relevant for C++.");
+ // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ if (E->isAssignmentOp())
+ return Cl::CL_LValue;
+
+ // C++ [expr.comma]p1: the result is of the same value category as its right
+ // operand, [...].
+ if (E->getOpcode() == BinaryOperator::Comma)
+ return ClassifyInternal(Ctx, E->getRHS());
+
+ // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
+ // is a pointer to a data member is of the same value category as its first
+ // operand.
+ if (E->getOpcode() == BinaryOperator::PtrMemD)
+ return E->getType()->isFunctionType() ? Cl::CL_MemberFunction :
+ ClassifyInternal(Ctx, E->getLHS());
+
+ // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
+ // second operand is a pointer to data member and a prvalue otherwise.
+ if (E->getOpcode() == BinaryOperator::PtrMemI)
+ return E->getType()->isFunctionType() ?
+ Cl::CL_MemberFunction : Cl::CL_LValue;
+
+ // All other binary operations are prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const ConditionalOperator *E) {
+ assert(Ctx.getLangOptions().CPlusPlus &&
+ "This is only relevant for C++.");
+
+ Expr *True = E->getTrueExpr();
+ Expr *False = E->getFalseExpr();
+ // C++ [expr.cond]p2
+ // If either the second or the third operand has type (cv) void, [...]
+ // the result [...] is a prvalue.
+ if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ return Cl::CL_PRValue;
+
+ // Note that at this point, we have already performed all conversions
+ // according to [expr.cond]p3.
+ // C++ [expr.cond]p4: If the second and third operands are glvalues of the
+ // same value category [...], the result is of that [...] value category.
+ // C++ [expr.cond]p5: Otherwise, the result is a prvalue.
+ Cl::Kinds LCl = ClassifyInternal(Ctx, True),
+ RCl = ClassifyInternal(Ctx, False);
+ return LCl == RCl ? LCl : Cl::CL_PRValue;
+}
+
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc) {
+ // As a general rule, we only care about lvalues. But there are some rvalues
+ // for which we want to generate special results.
+ if (Kind == Cl::CL_PRValue) {
+ // For the sake of better diagnostics, we want to specifically recognize
+ // use of the GCC cast-as-lvalue extension.
+ if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E->IgnoreParens())){
+ if (CE->getSubExpr()->Classify(Ctx).isLValue()) {
+ Loc = CE->getLParenLoc();
+ return Cl::CM_LValueCast;
+ }
+ }
+ }
+ if (Kind != Cl::CL_LValue)
+ return Cl::CM_RValue;
+
+ // This is the lvalue case.
+ // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
+ if (Ctx.getLangOptions().CPlusPlus && E->getType()->isFunctionType())
+ return Cl::CM_Function;
+
+ // You cannot assign to a variable outside a block from within the block if
+ // it is not marked __block, e.g.
+ // void takeclosure(void (^C)(void));
+ // void func() { int x = 1; takeclosure(^{ x = 7; }); }
+ if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(E)) {
+ if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
+ return Cl::CM_NotBlockQualified;
+ }
+
+ // Assignment to a property in ObjC is an implicit setter access. But a
+ // setter might not exist.
+ if (const ObjCImplicitSetterGetterRefExpr *Expr =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(E)) {
+ if (Expr->getSetterMethod() == 0)
+ return Cl::CM_NoSetterProperty;
+ }
+
+ CanQualType CT = Ctx.getCanonicalType(E->getType());
+ // Const stuff is obviously not modifiable.
+ if (CT.isConstQualified())
+ return Cl::CM_ConstQualified;
+ // Arrays are not modifiable, only their elements are.
+ if (CT->isArrayType())
+ return Cl::CM_ArrayType;
+ // Incomplete types are not modifiable.
+ if (CT->isIncompleteType())
+ return Cl::CM_IncompleteType;
+
+ // Records with any const fields (recursively) are not modifiable.
+ if (const RecordType *R = CT->getAs<RecordType>()) {
+ assert(!Ctx.getLangOptions().CPlusPlus &&
+ "C++ struct assignment should be resolved by the "
+ "copy assignment operator.");
+ if (R->hasConstFields())
+ return Cl::CM_ConstQualified;
+ }
+
+ return Cl::CM_Modifiable;
+}
+
+Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
+ Classification VC = Classify(Ctx);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: return LV_Valid;
+ case Cl::CL_XValue: return LV_InvalidExpression;
+ case Cl::CL_Function: return LV_NotObjectType;
+ case Cl::CL_Void: return LV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return LV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_PRValue: return LV_InvalidExpression;
+ }
+ llvm_unreachable("Unhandled kind");
+}
+
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ SourceLocation dummy;
+ Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: break;
+ case Cl::CL_XValue: return MLV_InvalidExpression;
+ case Cl::CL_Function: return MLV_NotObjectType;
+ case Cl::CL_Void: return MLV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return MLV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_PRValue:
+ return VC.getModifiable() == Cl::CM_LValueCast ?
+ MLV_LValueCast : MLV_InvalidExpression;
+ }
+ assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
+ switch (VC.getModifiable()) {
+ case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
+ case Cl::CM_Modifiable: return MLV_Valid;
+ case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
+ case Cl::CM_Function: return MLV_NotObjectType;
+ case Cl::CM_LValueCast:
+ llvm_unreachable("CM_LValueCast and CL_LValue don't match");
+ case Cl::CM_NotBlockQualified: return MLV_NotBlockQualified;
+ case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
+ case Cl::CM_ConstQualified: return MLV_ConstQualified;
+ case Cl::CM_ArrayType: return MLV_ArrayType;
+ case Cl::CM_IncompleteType: return MLV_IncompleteType;
+ }
+ llvm_unreachable("Unhandled modifiable type");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
index dc61401..3c97420 100644
--- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -157,7 +157,7 @@ static bool EvalPointerValueAsBool(LValue& Value, bool& Result) {
static bool HandleConversionToBool(const Expr* E, bool& Result,
EvalInfo &Info) {
- if (E->getType()->isIntegralType()) {
+ if (E->getType()->isIntegralOrEnumerationType()) {
APSInt IntResult;
if (!EvaluateInteger(E, IntResult, Info))
return false;
@@ -542,7 +542,7 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
SubExpr->getType()->isBlockPointerType())
return Visit(SubExpr);
- if (SubExpr->getType()->isIntegralType()) {
+ if (SubExpr->getType()->isIntegralOrEnumerationType()) {
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
@@ -563,6 +563,7 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
case CastExpr::CK_NoOp:
case CastExpr::CK_BitCast:
+ case CastExpr::CK_LValueBitCast:
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
return Visit(SubExpr);
@@ -746,25 +747,46 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
QualType EltTy = VT->getElementType();
llvm::SmallVector<APValue, 4> Elements;
- for (unsigned i = 0; i < NumElements; i++) {
+ // If a vector is initialized with a single element, that value
+ // becomes every element of the vector, not just the first.
+ // This is the behavior described in the IBM AltiVec documentation.
+ if (NumInits == 1) {
+ APValue InitValue;
if (EltTy->isIntegerType()) {
llvm::APSInt sInt(32);
- if (i < NumInits) {
- if (!EvaluateInteger(E->getInit(i), sInt, Info))
- return APValue();
- } else {
- sInt = Info.Ctx.MakeIntValue(0, EltTy);
- }
- Elements.push_back(APValue(sInt));
+ if (!EvaluateInteger(E->getInit(0), sInt, Info))
+ return APValue();
+ InitValue = APValue(sInt);
} else {
llvm::APFloat f(0.0);
- if (i < NumInits) {
- if (!EvaluateFloat(E->getInit(i), f, Info))
- return APValue();
+ if (!EvaluateFloat(E->getInit(0), f, Info))
+ return APValue();
+ InitValue = APValue(f);
+ }
+ for (unsigned i = 0; i < NumElements; i++) {
+ Elements.push_back(InitValue);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElements; i++) {
+ if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (i < NumInits) {
+ if (!EvaluateInteger(E->getInit(i), sInt, Info))
+ return APValue();
+ } else {
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ }
+ Elements.push_back(APValue(sInt));
} else {
- f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ llvm::APFloat f(0.0);
+ if (i < NumInits) {
+ if (!EvaluateFloat(E->getInit(i), f, Info))
+ return APValue();
+ } else {
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ }
+ Elements.push_back(APValue(f));
}
- Elements.push_back(APValue(f));
}
}
return APValue(&Elements[0], Elements.size());
@@ -818,7 +840,8 @@ public:
: Info(info), Result(result) {}
bool Success(const llvm::APSInt &SI, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
assert(SI.isSigned() == E->getType()->isSignedIntegerType() &&
"Invalid evaluation result.");
assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
@@ -828,7 +851,8 @@ public:
}
bool Success(const llvm::APInt &I, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
"Invalid evaluation result.");
Result = APValue(APSInt(I));
@@ -837,7 +861,8 @@ public:
}
bool Success(uint64_t Value, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
}
@@ -914,7 +939,7 @@ public:
return Success(0, E);
}
- bool VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return Success(0, E);
}
@@ -943,12 +968,12 @@ private:
} // end anonymous namespace
static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralType());
+ assert(E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralType());
+ assert(E->getType()->isIntegralOrEnumerationType());
APValue Val;
if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt())
@@ -1314,8 +1339,8 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return Success(Result, E);
}
}
- if (!LHSTy->isIntegralType() ||
- !RHSTy->isIntegralType()) {
+ if (!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) {
// We can't continue from here for non-integral types, and they
// could potentially confuse the following operations.
return false;
@@ -1570,7 +1595,7 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
}
// Only handle integral operations...
- if (!E->getSubExpr()->getType()->isIntegralType())
+ if (!E->getSubExpr()->getType()->isIntegralOrEnumerationType())
return false;
// Get the operand value into 'Result'.
@@ -1613,7 +1638,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
}
// Handle simple integer->integer casts.
- if (SrcType->isIntegralType()) {
+ if (SrcType->isIntegralOrEnumerationType()) {
if (!Visit(SubExpr))
return false;
@@ -1732,7 +1757,7 @@ public:
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitFloatingLiteral(const FloatingLiteral *E);
bool VisitCastExpr(CastExpr *E);
- bool VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ bool VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
bool VisitConditionalOperator(ConditionalOperator *E);
bool VisitChooseExpr(const ChooseExpr *E)
@@ -1908,7 +1933,7 @@ bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
Expr* SubExpr = E->getSubExpr();
- if (SubExpr->getType()->isIntegralType()) {
+ if (SubExpr->getType()->isIntegralOrEnumerationType()) {
APSInt IntResult;
if (!EvaluateInteger(SubExpr, IntResult, Info))
return false;
@@ -1928,7 +1953,7 @@ bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
return false;
}
-bool FloatExprEvaluator::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+bool FloatExprEvaluator::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
return true;
}
@@ -2186,6 +2211,8 @@ bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
} else if (E->getType()->isIntegerType()) {
if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(const_cast<Expr*>(E)))
return false;
+ if (Result.Val.isLValue() && !IsGlobalLValue(Result.Val.getLValueBase()))
+ return false;
} else if (E->getType()->hasPointerRepresentation()) {
LValue LV;
if (!EvaluatePointer(E, LV, Info))
@@ -2316,7 +2343,7 @@ static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
assert(!E->isValueDependent() && "Should not see value dependent exprs!");
- if (!E->getType()->isIntegralType()) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
return ICEDiag(2, E->getLocStart());
}
@@ -2384,7 +2411,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::IntegerLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::CXXBoolLiteralExprClass:
- case Expr::CXXZeroInitValueExprClass:
+ case Expr::CXXScalarValueInitExprClass:
case Expr::TypesCompatibleExprClass:
case Expr::UnaryTypeTraitExprClass:
return NoDiag();
@@ -2579,7 +2606,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
- if (SubExpr->getType()->isIntegralType())
+ if (SubExpr->getType()->isIntegralOrEnumerationType())
return CheckICE(SubExpr, Ctx);
if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
return NoDiag();
diff --git a/contrib/llvm/tools/clang/lib/AST/Makefile b/contrib/llvm/tools/clang/lib/AST/Makefile
index ede2577..7a1672b 100644
--- a/contrib/llvm/tools/clang/lib/AST/Makefile
+++ b/contrib/llvm/tools/clang/lib/AST/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangAST
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
index 983a287..88d71ce 100644
--- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -23,6 +23,35 @@ using namespace clang;
namespace {
+/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
+/// For a class hierarchy like
+///
+/// class A { };
+/// class B : A { };
+/// class C : A, B { };
+///
+/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
+/// instances, one for B and two for A.
+///
+/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
+struct BaseSubobjectInfo {
+ /// Class - The class for this base info.
+ const CXXRecordDecl *Class;
+
+ /// IsVirtual - Whether the BaseInfo represents a virtual base or not.
+ bool IsVirtual;
+
+ /// Bases - Information about the base subobjects.
+ llvm::SmallVector<BaseSubobjectInfo*, 4> Bases;
+
+ /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
+ /// of this base info (if one exists).
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo;
+
+ // FIXME: Document.
+ const BaseSubobjectInfo *Derived;
+};
+
/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
/// offsets while laying out a C++ class.
class EmptySubobjectMap {
@@ -36,30 +65,41 @@ class EmptySubobjectMap {
typedef llvm::DenseMap<uint64_t, ClassVectorTy> EmptyClassOffsetsMapTy;
EmptyClassOffsetsMapTy EmptyClassOffsets;
+ /// MaxEmptyClassOffset - The highest offset known to contain an empty
+ /// base subobject.
+ uint64_t MaxEmptyClassOffset;
+
/// ComputeEmptySubobjectSizes - Compute the size of the largest base or
/// member subobject that is empty.
void ComputeEmptySubobjectSizes();
+
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) const;
- struct BaseInfo {
- const CXXRecordDecl *Class;
- bool IsVirtual;
-
- const CXXRecordDecl *PrimaryVirtualBase;
-
- llvm::SmallVector<BaseInfo*, 4> Bases;
- const BaseInfo *Derived;
- };
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, uint64_t Offset);
- llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> VirtualBaseInfo;
- llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> NonVirtualBaseInfo;
+ bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ uint64_t Offset);
+ void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ uint64_t Offset, bool PlacingEmptyBase);
- BaseInfo *ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
- const BaseInfo *Derived);
- void ComputeBaseInfo();
+ bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) const;
+ bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ uint64_t Offset) const;
- bool CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, uint64_t Offset);
- void UpdateEmptyBaseSubobjects(const BaseInfo *Info, uint64_t Offset);
+ void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, uint64_t Offset);
+ /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
+ /// subobjects beyond the given offset.
+ bool AnyEmptySubobjectsBeyondOffset(uint64_t Offset) const {
+ return Offset <= MaxEmptyClassOffset;
+ }
+
public:
/// This holds the size of the largest empty subobject (either a base
/// or a member). Will be zero if the record being built doesn't contain
@@ -67,18 +107,21 @@ public:
uint64_t SizeOfLargestEmptySubobject;
EmptySubobjectMap(ASTContext &Context, const CXXRecordDecl *Class)
- : Context(Context), Class(Class), SizeOfLargestEmptySubobject(0) {
+ : Context(Context), Class(Class), MaxEmptyClassOffset(0),
+ SizeOfLargestEmptySubobject(0) {
ComputeEmptySubobjectSizes();
-
- ComputeBaseInfo();
}
/// CanPlaceBaseAtOffset - Return whether the given base class can be placed
/// at the given offset.
/// Returns false if placing the record will result in two components
/// (direct or indirect) of the same type having the same offset.
- bool CanPlaceBaseAtOffset(const CXXRecordDecl *RD, bool BaseIsVirtual,
+ bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset);
+
+ /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset);
};
void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
@@ -130,93 +173,67 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
}
}
-EmptySubobjectMap::BaseInfo *
-EmptySubobjectMap::ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
- const BaseInfo *Derived) {
- BaseInfo *Info;
-
- if (IsVirtual) {
- BaseInfo *&InfoSlot = VirtualBaseInfo[RD];
- if (InfoSlot) {
- assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
- return InfoSlot;
- }
+bool
+EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) const {
+ // We only need to check empty bases.
+ if (!RD->isEmpty())
+ return true;
- InfoSlot = new (Context) BaseInfo;
- Info = InfoSlot;
- } else {
- Info = new (Context) BaseInfo;
- }
-
- Info->Class = RD;
- Info->IsVirtual = IsVirtual;
- Info->Derived = Derived;
- Info->PrimaryVirtualBase = 0;
+ EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
+ if (I == EmptyClassOffsets.end())
+ return true;
- if (RD->getNumVBases()) {
- // Check if this class has a primary virtual base.
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- if (Layout.getPrimaryBaseWasVirtual()) {
- Info->PrimaryVirtualBase = Layout.getPrimaryBase();
- assert(Info->PrimaryVirtualBase &&
- "Didn't have a primary virtual base!");
- }
- }
+ const ClassVectorTy& Classes = I->second;
+ if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ return true;
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- Info->Bases.push_back(ComputeBaseInfo(BaseDecl, IsVirtual, Info));
- }
-
- return Info;
+ // There is already an empty class of the same type at this offset.
+ return false;
}
+
+void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) {
+ // We only care about empty bases.
+ if (!RD->isEmpty())
+ return;
-void EmptySubobjectMap::ComputeBaseInfo() {
- for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
- E = Class->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- BaseInfo *Info = ComputeBaseInfo(BaseDecl, IsVirtual, /*Derived=*/0);
- if (IsVirtual) {
- // ComputeBaseInfo has already added this base for us.
- continue;
- }
+ ClassVectorTy& Classes = EmptyClassOffsets[Offset];
+ assert(std::find(Classes.begin(), Classes.end(), RD) == Classes.end() &&
+ "Duplicate empty class detected!");
- // Add the base info to the map of non-virtual bases.
- assert(!NonVirtualBaseInfo.count(BaseDecl) &&
- "Non-virtual base already exists!");
- NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
- }
+ Classes.push_back(RD);
+
+ // Update the empty class offset.
+ MaxEmptyClassOffset = std::max(MaxEmptyClassOffset, Offset);
}
bool
-EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info,
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
+ return false;
+
// Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
- BaseInfo* Base = Info->Bases[I];
+ BaseSubobjectInfo* Base = Info->Bases[I];
if (Base->IsVirtual)
continue;
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
return false;
}
- if (Info->PrimaryVirtualBase) {
- BaseInfo *PrimaryVirtualBaseInfo =
- VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
- assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived) {
if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
@@ -224,62 +241,277 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info,
}
}
- // FIXME: Member variables.
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
return true;
}
-void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseInfo *Info,
- uint64_t Offset) {
- if (Info->Class->isEmpty()) {
- // FIXME: Record that there is an empty class at this offset.
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ uint64_t Offset,
+ bool PlacingEmptyBase) {
+ if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
+ // We know that the only empty subobjects that can conflict with empty
+ // subobject of non-empty bases, are empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty base
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ return;
}
-
+
+ AddSubobjectAtOffset(Info->Class, Offset);
+
// Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
- BaseInfo* Base = Info->Bases[I];
+ BaseSubobjectInfo* Base = Info->Bases[I];
if (Base->IsVirtual)
continue;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
-
- UpdateEmptyBaseSubobjects(Base, BaseOffset);
+ UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
}
- if (Info->PrimaryVirtualBase) {
- BaseInfo *PrimaryVirtualBaseInfo =
- VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
- assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived)
- UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset);
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
+ PlacingEmptyBase);
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
}
-
- // FIXME: Member variables.
}
-bool EmptySubobjectMap::CanPlaceBaseAtOffset(const CXXRecordDecl *RD,
- bool BaseIsVirtual,
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset) {
// If we know this class doesn't have any empty subobjects we don't need to
// bother checking.
if (!SizeOfLargestEmptySubobject)
return true;
- BaseInfo *Info;
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ // We are able to place the base at this offset. Make sure to update the
+ // empty base subobject map.
+ UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(RD, Offset))
+ return false;
- if (BaseIsVirtual)
- Info = VirtualBaseInfo.lookup(RD);
- else
- Info = NonVirtualBaseInfo.lookup(RD);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
+ return false;
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ uint64_t Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
- if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
+ }
+
+ // If we have an array type we need to look at every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ uint64_t ElementOffset = Offset;
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
+ return true;
+
+ if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
+ return false;
+
+ ElementOffset += Layout.getSize();
+ }
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) {
+ if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
return false;
- UpdateEmptyBaseSubobjects(Info, Offset);
+ // We are able to place the member variable at this offset.
+ // Make sure to update the empty base subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset);
return true;
}
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at offset
+ // zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (Offset >= SizeOfLargestEmptySubobject)
+ return;
+
+ AddSubobjectAtOffset(RD, Offset);
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
+ uint64_t Offset) {
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ return;
+ }
+
+ // If we have an array type we need to update every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ uint64_t ElementOffset = Offset;
+
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (ElementOffset >= SizeOfLargestEmptySubobject)
+ return;
+
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ ElementOffset += Layout.getSize();
+ }
+ }
+}
+
class RecordLayoutBuilder {
// FIXME: Remove this and make the appropriate fields public.
friend class clang::ASTContext;
@@ -346,10 +578,6 @@ class RecordLayoutBuilder {
/// avoid visiting virtual bases more than once.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
- /// EmptyClassOffsets - A map from offsets to empty record decls.
- typedef std::multimap<uint64_t, const CXXRecordDecl *> EmptyClassOffsetsTy;
- EmptyClassOffsetsTy EmptyClassOffsets;
-
RecordLayoutBuilder(ASTContext &Context, EmptySubobjectMap *EmptySubobjects)
: Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(8),
Packed(false), IsUnion(false), IsMac68kAlign(false),
@@ -366,9 +594,29 @@ class RecordLayoutBuilder {
void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize);
void LayoutBitField(const FieldDecl *D);
- /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
- /// member subobject that is empty.
- void ComputeEmptySubobjectSizes(const CXXRecordDecl *RD);
+ /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
+ llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
+ BaseSubobjectInfoMapTy;
+
+ /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
+ /// of the class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy VirtualBaseInfo;
+
+ /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
+ /// class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy NonVirtualBaseInfo;
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for the
+ /// bases of the given class.
+ void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for a
+ /// single class and all of its base classes.
+ BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived);
/// DeterminePrimaryBase - Determine the primary base of the given class.
void DeterminePrimaryBase(const CXXRecordDecl *RD);
@@ -387,43 +635,21 @@ class RecordLayoutBuilder {
void LayoutNonVirtualBases(const CXXRecordDecl *RD);
/// LayoutNonVirtualBase - Lays out a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *Base);
+ void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
- void AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- const CXXRecordDecl *MostDerivedClass);
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ uint64_t Offset);
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass);
/// LayoutVirtualBase - Lays out a single virtual base.
- void LayoutVirtualBase(const CXXRecordDecl *Base);
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base);
/// LayoutBase - Will lay out a base and return the offset where it was
/// placed, in bits.
- uint64_t LayoutBase(const CXXRecordDecl *Base, bool BaseIsVirtual);
-
- /// canPlaceRecordAtOffset - Return whether a record (either a base class
- /// or a field) can be placed at the given offset.
- /// Returns false if placing the record will result in two components
- /// (direct or indirect) of the same type having the same offset.
- bool canPlaceRecordAtOffset(const CXXRecordDecl *RD, uint64_t Offset,
- bool CheckVBases) const;
-
- /// canPlaceFieldAtOffset - Return whether a field can be placed at the given
- /// offset.
- bool canPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) const;
-
- /// UpdateEmptyClassOffsets - Called after a record (either a base class
- /// or a field) has been placed at the given offset. Will update the
- /// EmptyClassOffsets map if the class is empty or has any empty bases or
- /// fields.
- void UpdateEmptyClassOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- bool UpdateVBases);
-
- /// UpdateEmptyClassOffsets - Called after a field has been placed at the
- /// given offset.
- void UpdateEmptyClassOffsets(const FieldDecl *FD, uint64_t Offset);
+ uint64_t LayoutBase(const BaseSubobjectInfo *Base);
/// InitializeLayout - Initialize record layout for the given record decl.
void InitializeLayout(const Decl *D);
@@ -575,14 +801,127 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
UpdateAlignment(Context.Target.getPointerAlign(0));
}
+BaseSubobjectInfo *
+RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived) {
+ BaseSubobjectInfo *Info;
+
+ if (IsVirtual) {
+ // Check if we already have info about this virtual base.
+ BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ // We don't, create it.
+ InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = 0;
+ Info->PrimaryVirtualBaseInfo = 0;
+
+ const CXXRecordDecl *PrimaryVirtualBase = 0;
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0;
+
+ // Check if this base has a primary virtual base.
+ if (RD->getNumVBases()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ // This base does have a primary virtual base.
+ PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
+
+ // Now check if we have base subobject info about this primary base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+
+ if (PrimaryVirtualBaseInfo) {
+ if (PrimaryVirtualBaseInfo->Derived) {
+ // We did have info about this primary base, and it turns out that it
+ // has already been claimed as a primary virtual base for another
+ // base.
+ PrimaryVirtualBase = 0;
+ } else {
+ // We can claim this base as our primary base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+ }
+ }
+ }
+
+ // Now go through all direct bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
+ // Traversing the bases must have created the base info for our primary
+ // virtual base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo &&
+ "Did not create a primary virtual base!");
+
+ // Claim the primary virtual base as our primary virtual base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+
+ return Info;
+}
+
+void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Compute the base subobject info for this base.
+ BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
+
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ assert(VirtualBaseInfo.count(BaseDecl) &&
+ "Did not add virtual base!");
+ } else {
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+ }
+}
+
void
RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
- // First, determine the primary base class.
+ // Then, determine the primary base class.
DeterminePrimaryBase(RD);
+ // Compute base subobject info.
+ ComputeBaseSubobjectInfo(RD);
+
// If we have a primary base class, lay it out.
if (PrimaryBase) {
if (PrimaryBaseIsVirtual) {
+ // If the primary virtual base was a primary virtual base of some other
+ // base class we'll have to steal it.
+ BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
+ PrimaryBaseInfo->Derived = 0;
+
// We have a virtual primary base, insert it as an indirect primary base.
IndirectPrimaryBases.insert(PrimaryBase);
@@ -590,9 +929,15 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
"vbase already visited!");
VisitedVirtualBases.insert(PrimaryBase);
- LayoutVirtualBase(PrimaryBase);
- } else
- LayoutNonVirtualBase(PrimaryBase);
+ LayoutVirtualBase(PrimaryBaseInfo);
+ } else {
+ BaseSubobjectInfo *PrimaryBaseInfo =
+ NonVirtualBaseInfo.lookup(PrimaryBase);
+ assert(PrimaryBaseInfo &&
+ "Did not find base info for non-virtual primary base!");
+
+ LayoutNonVirtualBase(PrimaryBaseInfo);
+ }
}
// Now lay out the non-virtual bases.
@@ -603,81 +948,64 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
if (I->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// Skip the primary base.
- if (Base == PrimaryBase && !PrimaryBaseIsVirtual)
+ if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
continue;
// Lay out the base.
- LayoutNonVirtualBase(Base);
+ BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find base info for non-virtual base!");
+
+ LayoutNonVirtualBase(BaseInfo);
}
}
-void RecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *Base) {
+void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
// Layout the base.
- uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/false);
+ uint64_t Offset = LayoutBase(Base);
// Add its base class offset.
- if (!Bases.insert(std::make_pair(Base, Offset)).second)
- assert(false && "Added same base offset more than once!");
+ assert(!Bases.count(Base->Class) && "base offset already exists!");
+ Bases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
}
void
-RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
- uint64_t Offset,
- const CXXRecordDecl *MostDerivedClass) {
- // We already have the offset for the primary base of the most derived class.
- if (RD != MostDerivedClass) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- // If this is a primary virtual base and we haven't seen it before, add it.
- if (PrimaryBase && Layout.getPrimaryBaseWasVirtual() &&
- !VBases.count(PrimaryBase))
- VBases.insert(std::make_pair(PrimaryBase, Offset));
+RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ uint64_t Offset) {
+ // This base isn't interesting, it has no virtual bases.
+ if (!Info->Class->getNumVBases())
+ return;
+
+ // First, check if we have a virtual primary base to add offsets for.
+ if (Info->PrimaryVirtualBaseInfo) {
+ assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
+ "Primary virtual base is not virtual!");
+ if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
+ // Add the offset.
+ assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
+ "primary vbase offset already exists!");
+ VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
+ Offset));
+
+ // Traverse the primary virtual base.
+ AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
+ }
}
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- if (!BaseDecl->getNumVBases()) {
- // This base isn't interesting since it doesn't have any virtual bases.
+ // Now go through all direct non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ const BaseSubobjectInfo *Base = Info->Bases[I];
+ if (Base->IsVirtual)
continue;
- }
-
- // Compute the offset of this base.
- uint64_t BaseOffset;
-
- if (I->isVirtual()) {
- // If we don't know this vbase yet, don't visit it. It will be visited
- // later.
- if (!VBases.count(BaseDecl)) {
- continue;
- }
-
- // Check if we've already visited this base.
- if (!VisitedVirtualBases.insert(BaseDecl))
- continue;
- // We want the vbase offset from the class we're currently laying out.
- BaseOffset = VBases[BaseDecl];
- } else if (RD == MostDerivedClass) {
- // We want the base offset from the class we're currently laying out.
- assert(Bases.count(BaseDecl) && "Did not find base!");
- BaseOffset = Bases[BaseDecl];
- } else {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
- }
-
- AddPrimaryVirtualBaseOffsets(BaseDecl, BaseOffset, MostDerivedClass);
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
}
}
@@ -701,53 +1029,54 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
assert(!I->getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
if (I->isVirtual()) {
- if (PrimaryBase != Base || !PrimaryBaseIsVirtual) {
- bool IndirectPrimaryBase = IndirectPrimaryBases.count(Base);
+ if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
+ bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
// Only lay out the virtual base if it's not an indirect primary base.
if (!IndirectPrimaryBase) {
// Only visit virtual bases once.
- if (!VisitedVirtualBases.insert(Base))
+ if (!VisitedVirtualBases.insert(BaseDecl))
continue;
- LayoutVirtualBase(Base);
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+ LayoutVirtualBase(BaseInfo);
}
}
}
- if (!Base->getNumVBases()) {
+ if (!BaseDecl->getNumVBases()) {
// This base isn't interesting since it doesn't have any virtual bases.
continue;
}
- LayoutVirtualBases(Base, MostDerivedClass);
+ LayoutVirtualBases(BaseDecl, MostDerivedClass);
}
}
-void RecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *Base) {
+void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+ assert(!Base->Derived && "Trying to lay out a primary virtual base!");
+
// Layout the base.
- uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/true);
+ uint64_t Offset = LayoutBase(Base);
// Add its base class offset.
- if (!VBases.insert(std::make_pair(Base, Offset)).second)
- assert(false && "Added same vbase offset more than once!");
+ assert(!VBases.count(Base->Class) && "vbase offset already exists!");
+ VBases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
}
-uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
- bool BaseIsVirtual) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base);
+uint64_t RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
// If we have an empty base class, try to place it at offset 0.
- if (Base->isEmpty() &&
- EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, 0) &&
- canPlaceRecordAtOffset(Base, 0, /*CheckVBases=*/false)) {
- // We were able to place the class at offset 0.
- UpdateEmptyClassOffsets(Base, 0, /*UpdateVBases=*/false);
-
+ if (Base->Class->isEmpty() &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, 0)) {
Size = std::max(Size, Layout.getSize());
return 0;
@@ -759,15 +1088,10 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
uint64_t Offset = llvm::RoundUpToAlignment(DataSize, BaseAlign);
// Try to place the base.
- while (true) {
- if (EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, Offset) &&
- canPlaceRecordAtOffset(Base, Offset, /*CheckVBases=*/false))
- break;
-
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
Offset += BaseAlign;
- }
- if (!Base->isEmpty()) {
+ if (!Base->Class->isEmpty()) {
// Update the data size.
DataSize = Offset + Layout.getNonVirtualSize();
@@ -778,173 +1102,9 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
// Remember max struct/class alignment.
UpdateAlignment(BaseAlign);
- UpdateEmptyClassOffsets(Base, Offset, /*UpdateVBases=*/false);
return Offset;
}
-bool
-RecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset,
- bool CheckVBases) const {
- // Look for an empty class with the same type at the same offset.
- for (EmptyClassOffsetsTy::const_iterator I =
- EmptyClassOffsets.lower_bound(Offset),
- E = EmptyClassOffsets.upper_bound(Offset); I != E; ++I) {
-
- if (I->second == RD)
- return false;
- }
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Check bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
-
- if (!canPlaceRecordAtOffset(BaseDecl, Offset + BaseOffset,
- /*CheckVBases=*/false))
- return false;
- }
-
- // Check fields.
- unsigned FieldNo = 0;
- for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
-
- if (!canPlaceFieldAtOffset(FD, Offset + FieldOffset))
- return false;
- }
-
- if (CheckVBases) {
- // FIXME: virtual bases.
- }
-
- return true;
-}
-
-bool RecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD,
- uint64_t Offset) const {
- QualType T = FD->getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return canPlaceRecordAtOffset(RD, Offset, /*CheckVBases=*/true);
- }
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
- QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
- return true;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return true;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
- for (uint64_t I = 0; I != NumElements; ++I) {
- if (!canPlaceRecordAtOffset(RD, ElementOffset, /*CheckVBases=*/true))
- return false;
-
- ElementOffset += Layout.getSize();
- }
- }
-
- return true;
-}
-
-void RecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
- uint64_t Offset,
- bool UpdateVBases) {
- if (RD->isEmpty())
- EmptyClassOffsets.insert(std::make_pair(Offset, RD));
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Update bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- uint64_t BaseClassOffset = Layout.getBaseClassOffset(Base);
- UpdateEmptyClassOffsets(Base, Offset + BaseClassOffset,
- /*UpdateVBases=*/false);
- }
-
- // Update fields.
- unsigned FieldNo = 0;
- for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
- UpdateEmptyClassOffsets(FD, Offset + FieldOffset);
- }
-
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (UpdateVBases) {
- // FIXME: Update virtual bases.
- } else if (PrimaryBase && Layout.getPrimaryBaseWasVirtual()) {
- // We always want to update the offsets of a primary virtual base.
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
- "primary base class offset must always be 0!");
- UpdateEmptyClassOffsets(PrimaryBase, Offset, /*UpdateVBases=*/false);
- }
-}
-
-void
-RecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD,
- uint64_t Offset) {
- QualType T = FD->getType();
-
- if (const RecordType *RT = T->getAs<RecordType>()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- UpdateEmptyClassOffsets(RD, Offset, /*UpdateVBases=*/true);
- return;
- }
- }
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
- QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
- return;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return;
-
- const ASTRecordLayout &Info = Context.getASTRecordLayout(RD);
-
- uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
-
- for (uint64_t I = 0; I != NumElements; ++I) {
- UpdateEmptyClassOffsets(RD, ElementOffset, /*UpdateVBases=*/true);
- ElementOffset += Info.getSize();
- }
- }
-}
-
void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
IsUnion = RD->isUnion();
@@ -992,7 +1152,6 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
LayoutVirtualBases(RD, RD);
VisitedVirtualBases.clear();
- AddPrimaryVirtualBaseOffsets(RD, 0, RD);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
@@ -1137,7 +1296,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// Check if we need to add padding to give the field the correct alignment.
if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)
- FieldOffset = (FieldOffset + (FieldAlign-1)) & ~(FieldAlign-1);
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
// Padding members don't affect overall alignment.
if (!D->getIdentifier())
@@ -1208,17 +1367,12 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
// Round up the current record size to the field's alignment boundary.
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
- if (!IsUnion) {
- while (true) {
- // Check if we can place the field at this offset.
- if (canPlaceFieldAtOffset(D, FieldOffset))
- break;
-
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
// We couldn't place the field at the offset. Try again at a new offset.
FieldOffset += FieldAlign;
}
-
- UpdateEmptyClassOffsets(D, FieldOffset);
}
// Place this field at the current location.
@@ -1261,8 +1415,6 @@ void RecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
const CXXMethodDecl *
RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
- assert(RD->isDynamicClass() && "Class does not have any virtual methods!");
-
// If a class isn't polymorphic it doesn't have a key function.
if (!RD->isPolymorphic())
return 0;
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
index 80f5695..6dbe8f4 100644
--- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -499,14 +499,101 @@ void DeclStmt::DoDestroy(ASTContext &C) {
DG.getDeclGroup().Destroy(C);
}
+IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL, Stmt *elsev)
+ : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
+{
+ setConditionVariable(C, var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[THEN] = then;
+ SubExprs[ELSE] = elsev;
+}
+
+VarDecl *IfStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void IfStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ SourceLocation RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+{
+ SubExprs[INIT] = Init;
+ setConditionVariable(C, condVar);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[BODY] = Body;
+}
+
+VarDecl *ForStmt::getConditionVariable() const {
+ if (!SubExprs[CONDVAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[CONDVAR] = 0;
+ return;
+ }
+
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void ForStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(0)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = NULL;
+}
+
+VarDecl *SwitchStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void SwitchStmt::DoDestroy(ASTContext &C) {
// Destroy the SwitchCase statements in this switch. In the normal
// case, this loop will merely decrement the reference counts from
@@ -521,6 +608,35 @@ void SwitchStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL)
+: Stmt(WhileStmtClass)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = body;
+ WhileLoc = WL;
+}
+
+VarDecl *WhileStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void WhileStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
@@ -572,26 +688,26 @@ Stmt::child_iterator LabelStmt::child_end() { return &SubStmt+1; }
// IfStmt
Stmt::child_iterator IfStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator IfStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// SwitchStmt
Stmt::child_iterator SwitchStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator SwitchStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// WhileStmt
Stmt::child_iterator WhileStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator WhileStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// DoStmt
@@ -600,10 +716,10 @@ Stmt::child_iterator DoStmt::child_end() { return &SubExprs[0]+END_EXPR; }
// ForStmt
Stmt::child_iterator ForStmt::child_begin() {
- return child_iterator(CondVar, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator ForStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// ObjCForCollectionStmt
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
index 9bef49c..7043c35 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -682,7 +682,7 @@ void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
bool StmtPrinter::PrintOffsetOfDesignator(Expr *E) {
if (isa<UnaryOperator>(E)) {
// Base case, print the type and comma.
- OS << E->getType().getAsString() << ", ";
+ OS << E->getType().getAsString(Policy) << ", ";
return true;
} else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E)) {
PrintOffsetOfDesignator(ASE->getLHS());
@@ -706,7 +706,7 @@ void StmtPrinter::VisitUnaryOffsetOf(UnaryOperator *Node) {
void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << "__builtin_offsetof(";
- OS << Node->getTypeSourceInfo()->getType().getAsString() << ", ";
+ OS << Node->getTypeSourceInfo()->getType().getAsString(Policy) << ", ";
bool PrintedSomething = false;
for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
@@ -740,7 +740,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
void StmtPrinter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) {
OS << (Node->isSizeOf() ? "sizeof" : "__alignof");
if (Node->isArgumentType())
- OS << "(" << Node->getArgumentType().getAsString() << ")";
+ OS << "(" << Node->getArgumentType().getAsString(Policy) << ")";
else {
OS << " ";
PrintExpr(Node->getArgumentExpr());
@@ -802,11 +802,11 @@ void StmtPrinter::VisitExplicitCastExpr(ExplicitCastExpr *) {
assert(0 && "ExplicitCastExpr is an abstract class");
}
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
- OS << "(" << Node->getType().getAsString() << ")";
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
- OS << "(" << Node->getType().getAsString() << ")";
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
PrintExpr(Node->getInitializer());
}
void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
@@ -852,8 +852,8 @@ void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
void StmtPrinter::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
OS << "__builtin_types_compatible_p(";
- OS << Node->getArgType1().getAsString() << ",";
- OS << Node->getArgType2().getAsString() << ")";
+ OS << Node->getArgType1().getAsString(Policy) << ",";
+ OS << Node->getArgType2().getAsString(Policy) << ")";
}
void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
@@ -947,7 +947,7 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
OS << "__builtin_va_arg(";
PrintExpr(Node->getSubExpr());
OS << ", ";
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << ")";
}
@@ -1002,7 +1002,7 @@ void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
OS << Node->getCastName() << '<';
- OS << Node->getTypeAsWritten().getAsString() << ">(";
+ OS << Node->getTypeAsWritten().getAsString(Policy) << ">(";
PrintExpr(Node->getSubExpr());
OS << ")";
}
@@ -1026,7 +1026,7 @@ void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
- OS << Node->getTypeOperand().getAsString();
+ OS << Node->getTypeOperand().getAsString(Policy);
} else {
PrintExpr(Node->getExprOperand());
}
@@ -1059,7 +1059,7 @@ void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
}
void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << "(";
PrintExpr(Node->getSubExpr());
OS << ")";
@@ -1074,7 +1074,7 @@ void StmtPrinter::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *Node) {
}
void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << "(";
for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
@@ -1086,8 +1086,8 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
OS << ")";
}
-void StmtPrinter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *Node) {
- OS << Node->getType().getAsString() << "()";
+void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
+ OS << Node->getType().getAsString(Policy) << "()";
}
void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
@@ -1177,7 +1177,7 @@ void StmtPrinter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
void
StmtPrinter::VisitCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *Node) {
- OS << Node->getTypeAsWritten().getAsString();
+ OS << Node->getTypeAsWritten().getAsString(Policy);
OS << "(";
for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
@@ -1254,7 +1254,7 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << "("
- << E->getQueriedType().getAsString() << ")";
+ << E->getQueriedType().getAsString(Policy) << ")";
}
// Obj-C
@@ -1265,7 +1265,7 @@ void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
}
void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
- OS << "@encode(" << Node->getEncodedType().getAsString() << ')';
+ OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')';
}
void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
index ac3a9ee..cff86a4 100644
--- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -211,9 +211,11 @@ void StmtProfiler::VisitExpr(Expr *S) {
void StmtProfiler::VisitDeclRefExpr(DeclRefExpr *S) {
VisitExpr(S);
- VisitNestedNameSpecifier(S->getQualifier());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
VisitDecl(S->getDecl());
- VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+ if (!Canonical)
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitPredefinedExpr(PredefinedExpr *S) {
@@ -307,7 +309,8 @@ void StmtProfiler::VisitCallExpr(CallExpr *S) {
void StmtProfiler::VisitMemberExpr(MemberExpr *S) {
VisitExpr(S);
VisitDecl(S->getMemberDecl());
- VisitNestedNameSpecifier(S->getQualifier());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
ID.AddBoolean(S->isArrow());
}
@@ -428,6 +431,8 @@ void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) {
VisitDecl(S->getDecl());
ID.AddBoolean(S->isByRef());
ID.AddBoolean(S->isConstQualAdded());
+ if (S->getCopyConstructorExpr())
+ Visit(S->getCopyConstructorExpr());
}
static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
@@ -719,7 +724,7 @@ void StmtProfiler::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *S) {
VisitCXXConstructExpr(S);
}
-void StmtProfiler::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *S) {
+void StmtProfiler::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *S) {
VisitExpr(S);
}
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
index 1c775ef..02e6488 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -90,6 +90,33 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
}
}
+bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
+ if (getKind() != Other.getKind()) return false;
+
+ switch (getKind()) {
+ case Null:
+ case Type:
+ case Declaration:
+ case Template:
+ case Expression:
+ return TypeOrValue == Other.TypeOrValue;
+
+ case Integral:
+ return getIntegralType() == Other.getIntegralType() &&
+ *getAsIntegral() == *Other.getAsIntegral();
+
+ case Pack:
+ if (Args.NumArgs != Other.Args.NumArgs) return false;
+ for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
+ if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
+ return false;
+ return true;
+ }
+
+ // Suppress warnings.
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
index 14722f7..ef7b315 100644
--- a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
@@ -21,6 +21,17 @@
using namespace clang;
using namespace llvm;
+TemplateName::NameKind TemplateName::getKind() const {
+ if (Storage.is<TemplateDecl *>())
+ return Template;
+ if (Storage.is<OverloadedTemplateStorage *>())
+ return OverloadedTemplate;
+ if (Storage.is<QualifiedTemplateName *>())
+ return QualifiedTemplate;
+ assert(Storage.is<DependentTemplateName *>() && "There's a case unhandled!");
+ return DependentTemplate;
+}
+
TemplateDecl *TemplateName::getAsTemplateDecl() const {
if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
return Template;
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
index 1aab65e..d7929304 100644
--- a/contrib/llvm/tools/clang/lib/AST/Type.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -439,17 +439,49 @@ bool Type::isIntegerType() const {
return false;
}
-bool Type::isIntegralType() const {
+/// \brief Determine whether this type is an integral type.
+///
+/// This routine determines whether the given type is an integral type per
+/// C++ [basic.fundamental]p7. Although the C standard does not define the
+/// term "integral type", it has a similar term "integer type", and in C++
+/// the two terms are equivalent. However, C's "integer type" includes
+/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
+/// parameter is used to determine whether we should be following the C or
+/// C++ rules when determining whether this type is an integral/integer type.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// type", use this routine.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
+///
+/// \param Ctx The context in which this type occurs.
+///
+/// \returns true if the type is considered an integral type, false otherwise.
+bool Type::isIntegralType(ASTContext &Ctx) const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true; // Complete enum types are integral.
- // FIXME: In C++, enum types are never integral.
+
+ if (!Ctx.getLangOptions().CPlusPlus)
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
+ return true; // Complete enum types are integral in C.
+
return false;
}
+bool Type::isIntegralOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ if (isa<EnumType>(CanonicalType))
+ return true;
+
+ return false;
+}
+
bool Type::isEnumeralType() const {
if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
return TT->getDecl()->isEnum();
@@ -531,16 +563,19 @@ bool Type::isFloatingType() const {
BT->getKind() <= BuiltinType::LongDouble;
if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::hasFloatingRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
- return false;
+ else
+ return isFloatingType();
}
bool Type::isRealFloatingType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isFloatingPoint();
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
- return VT->getElementType()->isRealFloatingType();
return false;
}
@@ -550,8 +585,6 @@ bool Type::isRealType() const {
BT->getKind() <= BuiltinType::LongDouble;
if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
return TT->getDecl()->isEnum() && TT->getDecl()->isDefinition();
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
- return VT->getElementType()->isRealType();
return false;
}
@@ -563,7 +596,7 @@ bool Type::isArithmeticType() const {
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
return ET->getDecl()->isDefinition();
- return isa<ComplexType>(CanonicalType) || isa<VectorType>(CanonicalType);
+ return isa<ComplexType>(CanonicalType);
}
bool Type::isScalarType() const {
@@ -768,6 +801,7 @@ bool Type::isSpecifierType() const {
case TemplateSpecialization:
case Elaborated:
case DependentName:
+ case DependentTemplateSpecialization:
case ObjCInterface:
case ObjCObject:
case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
@@ -856,12 +890,56 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
}
}
+ElaboratedType::~ElaboratedType() {}
+DependentNameType::~DependentNameType() {}
+DependentTemplateSpecializationType::~DependentTemplateSpecializationType() {}
+
+void DependentTemplateSpecializationType::Destroy(ASTContext &C) {
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // FIXME: Not all expressions get cloned, so we can't yet perform
+ // this destruction.
+ // if (Expr *E = getArg(Arg).getAsExpr())
+ // E->Destroy(C);
+ }
+}
+
+DependentTemplateSpecializationType::DependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name,
+ unsigned NumArgs, const TemplateArgument *Args,
+ QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true),
+ NNS(NNS), Name(Name), NumArgs(NumArgs) {
+ assert(NNS && NNS->isDependent() &&
+ "DependentTemplateSpecializatonType requires dependent qualifier");
+ for (unsigned I = 0; I != NumArgs; ++I)
+ new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+}
+
+void
+DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(Qualifier);
+ ID.AddPointer(Name);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
Keyword = Elab->getKeyword();
else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
+ else if (const DependentTemplateSpecializationType *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
+ Keyword = DepTST->getKeyword();
else
return false;
@@ -914,6 +992,22 @@ const char *BuiltinType::getName(const LangOptions &LO) const {
void FunctionType::ANCHOR() {} // Key function for FunctionType.
+QualType QualType::getNonLValueExprType(ASTContext &Context) const {
+ if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+
+ // C++0x [basic.lval]:
+ // Class prvalues can have cv-qualified types; non-class prvalues always
+ // have cv-unqualified types.
+ //
+ // See also C99 6.3.2.1p2.
+ if (!Context.getLangOptions().CPlusPlus ||
+ (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
+ return getUnqualifiedType();
+
+ return *this;
+}
+
llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
case CC_Default: llvm_unreachable("no name for default cc");
@@ -1085,14 +1179,12 @@ anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N) {
}
TemplateSpecializationType::
-TemplateSpecializationType(ASTContext &Context, TemplateName T,
- bool IsCurrentInstantiation,
+TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs, QualType Canon)
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)),
- ContextAndCurrentInstantiation(&Context, IsCurrentInstantiation),
Template(T), NumArgs(NumArgs) {
assert((!Canon.isNull() ||
T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) &&
@@ -1113,25 +1205,12 @@ void TemplateSpecializationType::Destroy(ASTContext& C) {
}
}
-TemplateSpecializationType::iterator
-TemplateSpecializationType::end() const {
- return begin() + getNumArgs();
-}
-
-const TemplateArgument &
-TemplateSpecializationType::getArg(unsigned Idx) const {
- assert(Idx < getNumArgs() && "Template argument out of range");
- return getArgs()[Idx];
-}
-
void
TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
TemplateName T,
- bool IsCurrentInstantiation,
const TemplateArgument *Args,
unsigned NumArgs,
ASTContext &Context) {
- ID.AddBoolean(IsCurrentInstantiation);
T.Profile(ID);
for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
Args[Idx].Profile(ID, Context);
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
index 35a7e09..a08ee1a 100644
--- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -227,12 +227,13 @@ void TypePrinter::PrintDependentSizedExtVector(
}
void TypePrinter::PrintVector(const VectorType *T, std::string &S) {
- if (T->isAltiVec()) {
- if (T->isPixel())
+ if (T->getAltiVecSpecific() != VectorType::NotAltiVec) {
+ if (T->getAltiVecSpecific() == VectorType::Pixel)
S = "__vector __pixel " + S;
else {
Print(T->getElementType(), S);
- S = "__vector " + S;
+ S = ((T->getAltiVecSpecific() == VectorType::Bool)
+ ? "__vector __bool " : "__vector ") + S;
}
} else {
// FIXME: We prefer to print the size directly here, but have no way
@@ -452,11 +453,13 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
if (!HasKindDecoration)
OS << " " << D->getKindName();
- PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
- D->getLocation());
- OS << " at " << PLoc.getFilename()
- << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
+ if (D->getLocation().isValid()) {
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ D->getLocation());
+ OS << " at " << PLoc.getFilename()
+ << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ }
}
OS << '>';
@@ -578,15 +581,31 @@ void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S)
T->getQualifier()->print(OS, Policy);
- if (const IdentifierInfo *Ident = T->getIdentifier())
- OS << Ident->getName();
- else if (const TemplateSpecializationType *Spec = T->getTemplateId()) {
- Spec->getTemplateName().print(OS, Policy, true);
- OS << TemplateSpecializationType::PrintTemplateArgumentList(
- Spec->getArgs(),
- Spec->getNumArgs(),
+ OS << T->getIdentifier()->getName();
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::PrintDependentTemplateSpecialization(
+ const DependentTemplateSpecializationType *T, std::string &S) {
+ std::string MyString;
+ {
+ llvm::raw_string_ostream OS(MyString);
+
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
Policy);
- }
}
if (S.empty())
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
index 6f2cb41..08543aa 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -171,8 +171,8 @@ private:
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
bool FinishBlock(CFGBlock* B);
- CFGBlock *addStmt(Stmt *S, AddStmtChoice asc = AddStmtChoice::AlwaysAdd) {
- return Visit(S, asc);
+ CFGBlock *addStmt(Stmt *S) {
+ return Visit(S, AddStmtChoice::AlwaysAdd);
}
void AppendStmt(CFGBlock *B, Stmt *S,
@@ -538,6 +538,15 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
addStmt(B->getRHS());
return addStmt(B->getLHS());
}
+ else if (B->isAssignmentOp()) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ AppendStmt(Block, B, asc);
+ }
+
+ Visit(B->getRHS());
+ return Visit(B->getLHS(), AddStmtChoice::AsLValueNotAlwaysAdd);
+ }
return VisitStmt(B, asc);
}
@@ -612,8 +621,12 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!CanThrow(C->getCallee()))
AddEHEdge = false;
- if (!NoReturn && !AddEHEdge)
- return VisitStmt(C, AddStmtChoice::AlwaysAdd);
+ if (!NoReturn && !AddEHEdge) {
+ if (asc.asLValue())
+ return VisitStmt(C, AddStmtChoice::AlwaysAddAsLValue);
+ else
+ return VisitStmt(C, AddStmtChoice::AlwaysAdd);
+ }
if (Block) {
Succ = Block;
@@ -651,13 +664,13 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* LHSBlock = addStmt(C->getLHS(), asc);
+ CFGBlock* LHSBlock = Visit(C->getLHS(), asc);
if (!FinishBlock(LHSBlock))
return 0;
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* RHSBlock = addStmt(C->getRHS(), asc);
+ CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
if (!FinishBlock(RHSBlock))
return 0;
@@ -709,7 +722,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
Block = NULL;
CFGBlock* LHSBlock = NULL;
if (C->getLHS()) {
- LHSBlock = addStmt(C->getLHS(), asc);
+ LHSBlock = Visit(C->getLHS(), asc);
if (!FinishBlock(LHSBlock))
return 0;
Block = NULL;
@@ -717,7 +730,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
// Create the block for the RHS expression.
Succ = ConfluenceBlock;
- CFGBlock* RHSBlock = addStmt(C->getRHS(), asc);
+ CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
if (!FinishBlock(RHSBlock))
return 0;
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt
index a8e3708..f2916c2 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt
@@ -9,4 +9,5 @@ add_clang_library(clangAnalysis
UninitializedValues.cpp
)
-add_dependencies(clangAnalysis ClangDiagnosticAnalysis ClangStmtNodes)
+add_dependencies(clangAnalysis ClangAttrClasses ClangAttrList
+ ClangDiagnosticAnalysis ClangDeclNodes ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
index 01a36a1..4efe25e 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -256,17 +256,21 @@ void TransferFuncs::VisitAssign(BinaryOperator* B) {
// Assigning to a variable?
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) {
+ // Assignments to references don't kill the ref's address
+ if (DR->getDecl()->getType()->isReferenceType()) {
+ VisitDeclRefExpr(DR);
+ } else {
+ // Update liveness inforamtion.
+ unsigned bit = AD.getIdx(DR->getDecl());
+ LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
- // Update liveness inforamtion.
- unsigned bit = AD.getIdx(DR->getDecl());
- LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
-
- if (AD.Observer) { AD.Observer->ObserverKill(DR); }
+ if (AD.Observer) { AD.Observer->ObserverKill(DR); }
- // Handle things like +=, etc., which also generate "uses"
- // of a variable. Do this just by visiting the subexpression.
- if (B->getOpcode() != BinaryOperator::Assign)
- VisitDeclRefExpr(DR);
+ // Handle things like +=, etc., which also generate "uses"
+ // of a variable. Do this just by visiting the subexpression.
+ if (B->getOpcode() != BinaryOperator::Assign)
+ VisitDeclRefExpr(DR);
+ }
}
else // Not assigning to a variable. Process LHS as usual.
Visit(LHS);
diff --git a/contrib/llvm/tools/clang/lib/Analysis/Makefile b/contrib/llvm/tools/clang/lib/Analysis/Makefile
index 9b47380..03bf7a6 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/Makefile
+++ b/contrib/llvm/tools/clang/lib/Analysis/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangAnalysis
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
index 0b111e9..631fde6 100644
--- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -14,12 +14,16 @@
#include "clang/Analysis/Analyses/PrintfFormatString.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
using clang::analyze_printf::ArgTypeResult;
using clang::analyze_printf::FormatSpecifier;
using clang::analyze_printf::FormatStringHandler;
using clang::analyze_printf::OptionalAmount;
using clang::analyze_printf::PositionContext;
+using clang::analyze_printf::ConversionSpecifier;
+using clang::analyze_printf::LengthModifier;
using namespace clang;
@@ -35,7 +39,6 @@ public:
const FormatSpecifier &fs)
: FS(fs), Start(start), Stop(false) {}
-
const char *getStart() const { return Start; }
bool shouldStop() const { return Stop; }
bool hasValue() const { return Start != 0; }
@@ -80,7 +83,8 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) {
}
if (hasDigits)
- return OptionalAmount(OptionalAmount::Constant, accumulator, Beg);
+ return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg,
+ false);
break;
}
@@ -92,7 +96,7 @@ static OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
unsigned &argIndex) {
if (*Beg == '*') {
++Beg;
- return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg);
+ return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false);
}
return ParseAmount(Beg, E);
@@ -120,6 +124,8 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H,
assert(Amt.getHowSpecified() == OptionalAmount::Constant);
if (*I == '$') {
+ // Handle positional arguments
+
// Special case: '*0$', since this is an easy mistake.
if (Amt.getConstantAmount() == 0) {
H.HandleZeroPosition(Beg, I - Beg + 1);
@@ -130,7 +136,7 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H,
Beg = ++I;
return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
- Tmp);
+ Tmp, 0, true);
}
H.HandleInvalidPosition(Beg, I - Beg, p);
@@ -173,7 +179,6 @@ static bool ParseFieldWidth(FormatStringHandler &H, FormatSpecifier &FS,
return false;
}
-
static bool ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &FS, const char *Start,
const char *&Beg, const char *E) {
@@ -258,11 +263,11 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
for ( ; I != E; ++I) {
switch (*I) {
default: hasMore = false; break;
- case '-': FS.setIsLeftJustified(); break;
- case '+': FS.setHasPlusPrefix(); break;
- case ' ': FS.setHasSpacePrefix(); break;
- case '#': FS.setHasAlternativeForm(); break;
- case '0': FS.setHasLeadingZeros(); break;
+ case '-': FS.setIsLeftJustified(I); break;
+ case '+': FS.setHasPlusPrefix(I); break;
+ case ' ': FS.setHasSpacePrefix(I); break;
+ case '#': FS.setHasAlternativeForm(I); break;
+ case '0': FS.setHasLeadingZeros(I); break;
}
if (!hasMore)
break;
@@ -305,24 +310,28 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
}
// Look for the length modifier.
- LengthModifier lm = None;
+ LengthModifier::Kind lmKind = LengthModifier::None;
+ const char *lmPosition = I;
switch (*I) {
default:
break;
case 'h':
++I;
- lm = (I != E && *I == 'h') ? ++I, AsChar : AsShort;
+ lmKind = (I != E && *I == 'h') ?
+ ++I, LengthModifier::AsChar : LengthModifier::AsShort;
break;
case 'l':
++I;
- lm = (I != E && *I == 'l') ? ++I, AsLongLong : AsLong;
+ lmKind = (I != E && *I == 'l') ?
+ ++I, LengthModifier::AsLongLong : LengthModifier::AsLong;
break;
- case 'j': lm = AsIntMax; ++I; break;
- case 'z': lm = AsSizeT; ++I; break;
- case 't': lm = AsPtrDiff; ++I; break;
- case 'L': lm = AsLongDouble; ++I; break;
- case 'q': lm = AsLongLong; ++I; break;
+ case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
+ case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
+ case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
+ case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
+ case 'q': lmKind = LengthModifier::AsLongLong; ++I; break;
}
+ LengthModifier lm(lmPosition, lmKind);
FS.setLengthModifier(lm);
if (I == E) {
@@ -423,95 +432,111 @@ FormatStringHandler::~FormatStringHandler() {}
//===----------------------------------------------------------------------===//
bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
- assert(isValid());
-
- if (K == UnknownTy)
- return true;
-
- if (K == SpecificTy) {
- argTy = C.getCanonicalType(argTy).getUnqualifiedType();
-
- if (T == argTy)
+ switch (K) {
+ case InvalidTy:
+ assert(false && "ArgTypeResult must be valid");
return true;
- if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
- switch (BT->getKind()) {
- default:
- break;
- case BuiltinType::Char_S:
- case BuiltinType::SChar:
- return T == C.UnsignedCharTy;
- case BuiltinType::Char_U:
- case BuiltinType::UChar:
- return T == C.SignedCharTy;
- case BuiltinType::Short:
- return T == C.UnsignedShortTy;
- case BuiltinType::UShort:
- return T == C.ShortTy;
- case BuiltinType::Int:
- return T == C.UnsignedIntTy;
- case BuiltinType::UInt:
- return T == C.IntTy;
- case BuiltinType::Long:
- return T == C.UnsignedLongTy;
- case BuiltinType::ULong:
- return T == C.LongTy;
- case BuiltinType::LongLong:
- return T == C.UnsignedLongLongTy;
- case BuiltinType::ULongLong:
- return T == C.LongLongTy;
- }
-
- return false;
- }
+ case UnknownTy:
+ return true;
- if (K == CStrTy) {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
+ case SpecificTy: {
+ argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+ if (T == argTy)
+ return true;
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return T == C.UnsignedCharTy;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return T == C.SignedCharTy;
+ case BuiltinType::Short:
+ return T == C.UnsignedShortTy;
+ case BuiltinType::UShort:
+ return T == C.ShortTy;
+ case BuiltinType::Int:
+ return T == C.UnsignedIntTy;
+ case BuiltinType::UInt:
+ return T == C.IntTy;
+ case BuiltinType::Long:
+ return T == C.UnsignedLongTy;
+ case BuiltinType::ULong:
+ return T == C.LongTy;
+ case BuiltinType::LongLong:
+ return T == C.UnsignedLongLongTy;
+ case BuiltinType::ULongLong:
+ return T == C.LongLongTy;
+ }
return false;
+ }
- QualType pointeeTy = PT->getPointeeType();
-
- if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Void:
- case BuiltinType::Char_U:
- case BuiltinType::UChar:
- case BuiltinType::Char_S:
- case BuiltinType::SChar:
- return true;
- default:
- break;
- }
-
- return false;
- }
+ case CStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy = PT->getPointeeType();
+ if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return true;
+ default:
+ break;
+ }
- if (K == WCStrTy) {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
return false;
+ }
+
+ case WCStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy =
+ C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ return pointeeTy == C.getWCharType();
+ }
- QualType pointeeTy =
- C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ case CPointerTy:
+ return argTy->getAs<PointerType>() != NULL ||
+ argTy->getAs<ObjCObjectPointerType>() != NULL;
- return pointeeTy == C.getWCharType();
+ case ObjCPointerTy:
+ return argTy->getAs<ObjCObjectPointerType>() != NULL;
}
+ // FIXME: Should be unreachable, but Clang is currently emitting
+ // a warning.
return false;
}
QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
- assert(isValid());
- if (K == SpecificTy)
- return T;
- if (K == CStrTy)
- return C.getPointerType(C.CharTy);
- if (K == WCStrTy)
- return C.getPointerType(C.getWCharType());
- if (K == ObjCPointerTy)
- return C.ObjCBuiltinIdTy;
+ switch (K) {
+ case InvalidTy:
+ assert(false && "No representative type for Invalid ArgTypeResult");
+ // Fall-through.
+ case UnknownTy:
+ return QualType();
+ case SpecificTy:
+ return T;
+ case CStrTy:
+ return C.getPointerType(C.CharTy);
+ case WCStrTy:
+ return C.getPointerType(C.getWCharType());
+ case ObjCPointerTy:
+ return C.ObjCBuiltinIdTy;
+ case CPointerTy:
+ return C.VoidPtrTy;
+ }
+ // FIXME: Should be unreachable, but Clang is currently emitting
+ // a warning.
return QualType();
}
@@ -524,6 +549,99 @@ ArgTypeResult OptionalAmount::getArgType(ASTContext &Ctx) const {
}
//===----------------------------------------------------------------------===//
+// Methods on ConversionSpecifier.
+//===----------------------------------------------------------------------===//
+const char *ConversionSpecifier::toString() const {
+ switch (kind) {
+ case bArg: return "b";
+ case dArg: return "d";
+ case iArg: return "i";
+ case oArg: return "o";
+ case uArg: return "u";
+ case xArg: return "x";
+ case XArg: return "X";
+ case fArg: return "f";
+ case FArg: return "F";
+ case eArg: return "e";
+ case EArg: return "E";
+ case gArg: return "g";
+ case GArg: return "G";
+ case aArg: return "a";
+ case AArg: return "A";
+ case IntAsCharArg: return "c";
+ case CStrArg: return "s";
+ case VoidPtrArg: return "p";
+ case OutIntPtrArg: return "n";
+ case PercentArg: return "%";
+ case InvalidSpecifier: return NULL;
+
+ // MacOS X unicode extensions.
+ case CArg: return "C";
+ case UnicodeStrArg: return "S";
+
+ // Objective-C specific specifiers.
+ case ObjCObjArg: return "@";
+
+ // GlibC specific specifiers.
+ case PrintErrno: return "m";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on LengthModifier.
+//===----------------------------------------------------------------------===//
+
+const char *LengthModifier::toString() const {
+ switch (kind) {
+ case AsChar:
+ return "hh";
+ case AsShort:
+ return "h";
+ case AsLong: // or AsWideChar
+ return "l";
+ case AsLongLong:
+ return "ll";
+ case AsIntMax:
+ return "j";
+ case AsSizeT:
+ return "z";
+ case AsPtrDiff:
+ return "t";
+ case AsLongDouble:
+ return "L";
+ case None:
+ return "";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+void OptionalAmount::toString(llvm::raw_ostream &os) const {
+ switch (hs) {
+ case Invalid:
+ case NotSpecified:
+ return;
+ case Arg:
+ if (UsesDotPrefix)
+ os << ".";
+ if (usesPositionalArg())
+ os << "*" << getPositionalArgIndex() << "$";
+ else
+ os << "*";
+ break;
+ case Constant:
+ if (UsesDotPrefix)
+ os << ".";
+ os << amt;
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
// Methods on FormatSpecifier.
//===----------------------------------------------------------------------===//
@@ -532,57 +650,60 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const {
return ArgTypeResult::Invalid();
if (CS.isIntArg())
- switch (LM) {
- case AsLongDouble:
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
return ArgTypeResult::Invalid();
- case None: return Ctx.IntTy;
- case AsChar: return Ctx.SignedCharTy;
- case AsShort: return Ctx.ShortTy;
- case AsLong: return Ctx.LongTy;
- case AsLongLong: return Ctx.LongLongTy;
- case AsIntMax:
+ case LengthModifier::None: return Ctx.IntTy;
+ case LengthModifier::AsChar: return Ctx.SignedCharTy;
+ case LengthModifier::AsShort: return Ctx.ShortTy;
+ case LengthModifier::AsLong: return Ctx.LongTy;
+ case LengthModifier::AsLongLong: return Ctx.LongLongTy;
+ case LengthModifier::AsIntMax:
// FIXME: Return unknown for now.
return ArgTypeResult();
- case AsSizeT: return Ctx.getSizeType();
- case AsPtrDiff: return Ctx.getPointerDiffType();
+ case LengthModifier::AsSizeT: return Ctx.getSizeType();
+ case LengthModifier::AsPtrDiff: return Ctx.getPointerDiffType();
}
if (CS.isUIntArg())
- switch (LM) {
- case AsLongDouble:
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
return ArgTypeResult::Invalid();
- case None: return Ctx.UnsignedIntTy;
- case AsChar: return Ctx.UnsignedCharTy;
- case AsShort: return Ctx.UnsignedShortTy;
- case AsLong: return Ctx.UnsignedLongTy;
- case AsLongLong: return Ctx.UnsignedLongLongTy;
- case AsIntMax:
+ case LengthModifier::None: return Ctx.UnsignedIntTy;
+ case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
+ case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
+ case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
+ case LengthModifier::AsLongLong: return Ctx.UnsignedLongLongTy;
+ case LengthModifier::AsIntMax:
// FIXME: Return unknown for now.
return ArgTypeResult();
- case AsSizeT:
+ case LengthModifier::AsSizeT:
// FIXME: How to get the corresponding unsigned
// version of size_t?
return ArgTypeResult();
- case AsPtrDiff:
+ case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
return ArgTypeResult();
}
if (CS.isDoubleArg()) {
- if (LM == AsLongDouble)
+ if (LM.getKind() == LengthModifier::AsLongDouble)
return Ctx.LongDoubleTy;
return Ctx.DoubleTy;
}
switch (CS.getKind()) {
case ConversionSpecifier::CStrArg:
- return ArgTypeResult(LM == AsWideChar ? ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
+ return ArgTypeResult(LM.getKind() == LengthModifier::AsWideChar ?
+ ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
case ConversionSpecifier::UnicodeStrArg:
// FIXME: This appears to be Mac OS X specific.
return ArgTypeResult::WCStrTy;
case ConversionSpecifier::CArg:
return Ctx.WCharTy;
+ case ConversionSpecifier::VoidPtrArg:
+ return ArgTypeResult::CPointerTy;
default:
break;
}
@@ -591,3 +712,329 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const {
return ArgTypeResult();
}
+bool FormatSpecifier::fixType(QualType QT) {
+ // Handle strings first (char *, wchar_t *)
+ if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
+ CS.setKind(ConversionSpecifier::CStrArg);
+
+ // Disable irrelevant flags
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+
+ // Set the long length modifier for wide characters
+ if (QT->getPointeeType()->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+
+ return true;
+ }
+
+ // We can only work with builtin types.
+ if (!QT->isBuiltinType())
+ return false;
+
+ // Everything else should be a base type
+ const BuiltinType *BT = QT->getAs<BuiltinType>();
+
+ // Set length modifier
+ switch (BT->getKind()) {
+ default:
+ // The rest of the conversions are either optional or for non-builtin types
+ LM.setKind(LengthModifier::None);
+ break;
+
+ case BuiltinType::WChar:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+ }
+
+ // Set conversion specifier and disable any flags which do not apply to it.
+ if (QT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::IntAsCharArg);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ // Test for Floating type first as LongDouble can pass isUnsignedIntegerType
+ else if (QT->isRealFloatingType()) {
+ CS.setKind(ConversionSpecifier::fArg);
+ }
+ else if (QT->isPointerType()) {
+ CS.setKind(ConversionSpecifier::VoidPtrArg);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ else if (QT->isSignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::dArg);
+ HasAlternativeForm = 0;
+ }
+ else if (QT->isUnsignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::uArg);
+ HasAlternativeForm = 0;
+ HasPlusPrefix = 0;
+ }
+ else {
+ return false;
+ }
+
+ return true;
+}
+
+void FormatSpecifier::toString(llvm::raw_ostream &os) const {
+ // Whilst some features have no defined order, we are using the order
+ // appearing in the C99 standard (ISO/IEC 9899:1999 (E) ¤7.19.6.1)
+ os << "%";
+
+ // Positional args
+ if (usesPositionalArg()) {
+ os << getPositionalArgIndex() << "$";
+ }
+
+ // Conversion flags
+ if (IsLeftJustified) os << "-";
+ if (HasPlusPrefix) os << "+";
+ if (HasSpacePrefix) os << " ";
+ if (HasAlternativeForm) os << "#";
+ if (HasLeadingZeroes) os << "0";
+
+ // Minimum field width
+ FieldWidth.toString(os);
+ // Precision
+ Precision.toString(os);
+ // Length modifier
+ os << LM.toString();
+ // Conversion specifier
+ os << CS.toString();
+}
+
+bool FormatSpecifier::hasValidPlusPrefix() const {
+ if (!HasPlusPrefix)
+ return true;
+
+ // The plus prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidAlternativeForm() const {
+ if (!HasAlternativeForm)
+ return true;
+
+ // Alternate form flag only valid with the oxaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidLeadingZeros() const {
+ if (!HasLeadingZeroes)
+ return true;
+
+ // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidSpacePrefix() const {
+ if (!HasSpacePrefix)
+ return true;
+
+ // The space prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidLeftJustified() const {
+ if (!IsLeftJustified)
+ return true;
+
+ // The left justified flag is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::OutIntPtrArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+bool FormatSpecifier::hasValidLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return true;
+
+ // Handle most integer flags
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::OutIntPtrArg:
+ return true;
+ default:
+ return false;
+ }
+
+ // Handle 'l' flag
+ case LengthModifier::AsLong:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::OutIntPtrArg:
+ case ConversionSpecifier::IntAsCharArg:
+ case ConversionSpecifier::CStrArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsLongDouble:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool FormatSpecifier::hasValidPrecision() const {
+ if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // Precision is only valid with the diouxXaAeEfFgGs conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::CStrArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+bool FormatSpecifier::hasValidFieldWidth() const {
+ if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // The field width is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::OutIntPtrArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt
index 1a89acc..87bf834 100644
--- a/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt
@@ -25,6 +25,8 @@ if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn")
endif()
add_dependencies(clangBasic
+ ClangARMNeon
+ ClangAttrList
ClangDiagnosticAnalysis
ClangDiagnosticAST
ClangDiagnosticCommon
@@ -34,3 +36,4 @@ add_dependencies(clangBasic
ClangDiagnosticLex
ClangDiagnosticParse
ClangDiagnosticSema)
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
index 2fd985f..641d87b 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -250,6 +250,7 @@ Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
ErrorsAsFatal = false;
SuppressSystemWarnings = false;
SuppressAllDiagnostics = false;
+ ShowOverloads = Ovl_All;
ExtBehavior = Ext_Ignore;
ErrorOccurred = false;
@@ -1042,8 +1043,7 @@ StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info)
- : Level(Level), Loc(Info.getLocation())
-{
+ : Level(Level), Loc(Info.getLocation()) {
llvm::SmallString<64> Message;
Info.FormatDiagnostic(Message);
this->Message.assign(Message.begin(), Message.end());
@@ -1130,6 +1130,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
WriteSourceLocation(OS, SM, R->getBegin());
WriteSourceLocation(OS, SM, R->getEnd());
+ WriteUnsigned(OS, R->isTokenRange());
}
}
@@ -1158,6 +1159,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
WriteSourceLocation(OS, SM, F->RemoveRange.getBegin());
WriteSourceLocation(OS, SM, F->RemoveRange.getEnd());
+ WriteUnsigned(OS, F->RemoveRange.isTokenRange());
WriteSourceLocation(OS, SM, F->InsertionLoc);
WriteString(OS, F->CodeToInsert);
}
@@ -1271,11 +1273,14 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
return Diag;
for (unsigned I = 0; I != NumSourceRanges; ++I) {
SourceLocation Begin, End;
+ unsigned IsTokenRange;
if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, End))
+ ReadSourceLocation(FM, SM, Memory, MemoryEnd, End) ||
+ ReadUnsigned(Memory, MemoryEnd, IsTokenRange))
return Diag;
- Diag.Ranges.push_back(SourceRange(Begin, End));
+ Diag.Ranges.push_back(CharSourceRange(SourceRange(Begin, End),
+ IsTokenRange));
}
// Read the fix-it hints.
@@ -1284,9 +1289,10 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
return Diag;
for (unsigned I = 0; I != NumFixIts; ++I) {
SourceLocation RemoveBegin, RemoveEnd, InsertionLoc;
- unsigned InsertLen = 0;
+ unsigned InsertLen = 0, RemoveIsTokenRange;
if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) ||
ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) ||
+ ReadUnsigned(Memory, MemoryEnd, RemoveIsTokenRange) ||
ReadSourceLocation(FM, SM, Memory, MemoryEnd, InsertionLoc) ||
ReadUnsigned(Memory, MemoryEnd, InsertLen) ||
Memory + InsertLen > MemoryEnd) {
@@ -1295,7 +1301,8 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
}
FixItHint Hint;
- Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd);
+ Hint.RemoveRange = CharSourceRange(SourceRange(RemoveBegin, RemoveEnd),
+ RemoveIsTokenRange);
Hint.InsertionLoc = InsertionLoc;
Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
Memory += InsertLen;
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
index c4296c3..3c91a0f 100644
--- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -331,8 +331,8 @@ const FileEntry *FileManager::getFile(const char *NameStart,
}
const FileEntry *
-FileManager::getVirtualFile(const llvm::StringRef &Filename,
- off_t Size, time_t ModificationTime) {
+FileManager::getVirtualFile(llvm::StringRef Filename, off_t Size,
+ time_t ModificationTime) {
const char *NameStart = Filename.begin(), *NameEnd = Filename.end();
++NumFileLookups;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Makefile b/contrib/llvm/tools/clang/lib/Basic/Makefile
index 58ac7eb..51b8ac1 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Makefile
+++ b/contrib/llvm/tools/clang/lib/Basic/Makefile
@@ -11,16 +11,11 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangBasic
BUILD_ARCHIVE = 1
-CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-ifdef CLANG_VENDOR
-CPPFLAGS += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
-endif
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
SVN_REVISION := $(shell $(LLVM_SRC_ROOT)/utils/GetSourceVersion $(PROJ_SRC_DIR)/../..)
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
index 6692e64..7fcf372 100644
--- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -34,6 +34,8 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
DoubleAlign = 64;
LongDoubleWidth = 64;
LongDoubleAlign = 64;
+ LargeArrayMinWidth = 0;
+ LargeArrayAlign = 0;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntMaxType = SignedLongLong;
@@ -53,6 +55,9 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
"i64:64:64-f32:32:32-f64:64:64-n32";
UserLabelPrefix = "_";
HasAlignMac68kSupport = false;
+
+ // Default to no types using fpret.
+ RealTypeUsesObjCFPRet = 0;
}
// Out of line virtual dtor for TargetInfo.
@@ -282,6 +287,8 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
Info.setAllowsRegister();
Info.setAllowsMemory();
break;
+ case ',': // FIXME: Until we handle multiple alternative constraints,
+ return true; // ignore everything after the first comma.
}
Name++;
@@ -375,6 +382,8 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
Info.setAllowsRegister();
Info.setAllowsMemory();
break;
+ case ',': // FIXME: Until we handle multiple alternative constraints,
+ return true; // ignore everything after the first comma.
}
Name++;
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
index 92fd417..fdf63e7 100644
--- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -150,7 +150,7 @@ protected:
public:
DarwinTargetInfo(const std::string& triple) :
OSTargetInfo<Target>(triple) {
- this->TLSSupported = false;
+ this->TLSSupported = llvm::Triple(triple).getDarwinMajorNumber() > 10;
}
virtual std::string isValidSectionSpecifier(llvm::StringRef SR) const {
@@ -160,6 +160,12 @@ public:
return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
TAA, StubSize);
}
+
+ virtual const char *getStaticInitSectionSpecifier() const {
+ // FIXME: We should return 0 when building kexts.
+ return "__TEXT,__StaticInit,regular,pure_instructions";
+ }
+
};
@@ -206,6 +212,30 @@ public:
}
};
+// Minix Target
+template<typename Target>
+class MinixTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // Minix defines
+
+ Builder.defineMacro("__minix", "3");
+ Builder.defineMacro("_EM_WSIZE", "4");
+ Builder.defineMacro("_EM_PSIZE", "4");
+ Builder.defineMacro("_EM_SSIZE", "2");
+ Builder.defineMacro("_EM_LSIZE", "4");
+ Builder.defineMacro("_EM_FSIZE", "4");
+ Builder.defineMacro("_EM_DSIZE", "8");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ MinixTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
// Linux target
template<typename Target>
class LinuxTargetInfo : public OSTargetInfo<Target> {
@@ -299,13 +329,20 @@ protected:
Builder.defineMacro("__CELLOS_LV2__");
Builder.defineMacro("__ELF__");
Builder.defineMacro("__LP32__");
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("__powerpc64__");
}
public:
PS3PPUTargetInfo(const std::string& triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
this->LongWidth = this->LongAlign = this->PointerWidth = this->PointerAlign = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->UIntMaxType = TargetInfo::UnsignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
+ this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
}
};
@@ -413,12 +450,98 @@ public:
switch (*Name) {
default: return false;
case 'O': // Zero
- return true;
+ break;
case 'b': // Base register
case 'f': // Floating point register
Info.setAllowsRegister();
- return true;
+ break;
+ // FIXME: The following are added to allow parsing.
+ // I just took a guess at what the actions should be.
+ // Also, is more specific checking needed? I.e. specific registers?
+ case 'd': // Floating point register (containing 64-bit value)
+ case 'v': // Altivec vector register
+ Info.setAllowsRegister();
+ break;
+ case 'w':
+ switch (Name[1]) {
+ case 'd':// VSX vector register to hold vector double data
+ case 'f':// VSX vector register to hold vector float data
+ case 's':// VSX vector register to hold scalar float data
+ case 'a':// Any VSX register
+ break;
+ default:
+ return false;
+ }
+ Info.setAllowsRegister();
+ Name++; // Skip over 'w'.
+ break;
+ case 'h': // `MQ', `CTR', or `LINK' register
+ case 'q': // `MQ' register
+ case 'c': // `CTR' register
+ case 'l': // `LINK' register
+ case 'x': // `CR' register (condition register) number 0
+ case 'y': // `CR' register (condition register)
+ case 'z': // `XER[CA]' carry bit (part of the XER register)
+ Info.setAllowsRegister();
+ break;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Unsigned 16-bit constant shifted left 16 bits
+ // (use `L' instead for SImode constants)
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 16-bit constant shifted left 16 bits
+ case 'M': // Constant larger than 31
+ case 'N': // Exact power of 2
+ case 'P': // Constant whose negation is a signed 16-bit constant
+ case 'G': // Floating point constant that can be loaded into a
+ // register with one instruction per word
+ case 'H': // Integer/Floating point constant that can be loaded
+ // into a register using three instructions
+ break;
+ case 'm': // Memory operand. Note that on PowerPC targets, m can
+ // include addresses that update the base register. It
+ // is therefore only safe to use `m' in an asm statement
+ // if that asm statement accesses the operand exactly once.
+ // The asm statement must also use `%U<opno>' as a
+ // placeholder for the “update” flag in the corresponding
+ // load or store instruction. For example:
+ // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val));
+ // is correct but:
+ // asm ("st %1,%0" : "=m" (mem) : "r" (val));
+ // is not. Use es rather than m if you don't want the base
+ // register to be updated.
+ case 'e':
+ if (Name[1] != 's')
+ return false;
+ // es: A “stable” memory operand; that is, one which does not
+ // include any automodification of the base register. Unlike
+ // `m', this constraint can be used in asm statements that
+ // might access the operand several times, or that might not
+ // access it at all.
+ Info.setAllowsMemory();
+ Name++; // Skip over 'e'.
+ break;
+ case 'Q': // Memory operand that is an offset from a register (it is
+ // usually better to use `m' or `es' in asm statements)
+ case 'Z': // Memory operand that is an indexed or indirect from a
+ // register (it is usually better to use `m' or `es' in
+ // asm statements)
+ Info.setAllowsMemory();
+ Info.setAllowsRegister();
+ break;
+ case 'R': // AIX TOC entry
+ case 'a': // Address operand that is an indexed or indirect from a
+ // register (`p' is preferable for asm statements)
+ case 'S': // Constant suitable as a 64-bit mask operand
+ case 'T': // Constant suitable as a 32-bit mask operand
+ case 'U': // System V Release 4 small data area reference
+ case 't': // AND masks that can be performed by two rldic{l, r}
+ // instructions
+ case 'W': // Vector constant that does not require memory
+ case 'j': // Vector constant that is all zeros.
+ break;
+ // End FIXME.
}
+ return true;
}
virtual const char *getClobbers() const {
return "";
@@ -600,6 +723,27 @@ public:
};
} // end anonymous namespace.
+
+namespace {
+class DarwinPPCTargetInfo :
+ public DarwinTargetInfo<PPCTargetInfo> {
+public:
+ DarwinPPCTargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPCTargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ }
+};
+
+class DarwinPPC64TargetInfo :
+ public DarwinTargetInfo<PPC64TargetInfo> {
+public:
+ DarwinPPC64TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPC64TargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ }
+};
+} // end anonymous namespace.
+
namespace {
// MBlaze abstract base class
class MBlazeTargetInfo : public TargetInfo {
@@ -1101,6 +1245,11 @@ public:
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
RegParmMax = 3;
+
+ // Use fpret for all types.
+ RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) |
+ (1 << TargetInfo::Double) |
+ (1 << TargetInfo::LongDouble));
}
virtual const char *getVAListDeclaration() const {
return "typedef char* __builtin_va_list;";
@@ -1257,6 +1406,8 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
LongDoubleWidth = 128;
LongDoubleAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
Int64Type = SignedLong;
@@ -1265,6 +1416,9 @@ public:
DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
"a0:0:64-s0:64:64-f80:128:128-n8:16:32:64";
+
+ // Use fpret only for long double.
+ RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
}
virtual const char *getVAListDeclaration() const {
return "typedef struct __va_list_tag {"
@@ -2294,6 +2448,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::arm:
case llvm::Triple::thumb:
switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::Darwin:
return new DarwinARMTargetInfo(T);
case llvm::Triple::FreeBSD:
@@ -2327,14 +2483,14 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::ppc:
if (os == llvm::Triple::Darwin)
- return new DarwinTargetInfo<PPCTargetInfo>(T);
+ return new DarwinPPCTargetInfo(T);
else if (os == llvm::Triple::FreeBSD)
return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
return new PPC32TargetInfo(T);
case llvm::Triple::ppc64:
if (os == llvm::Triple::Darwin)
- return new DarwinTargetInfo<PPC64TargetInfo>(T);
+ return new DarwinPPC64TargetInfo(T);
else if (os == llvm::Triple::Lv2)
return new PS3PPUTargetInfo<PPC64TargetInfo>(T);
else if (os == llvm::Triple::FreeBSD)
@@ -2377,6 +2533,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new OpenBSDI386TargetInfo(T);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Minix:
+ return new MinixTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::Solaris:
return new SolarisTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::Cygwin:
@@ -2444,6 +2602,12 @@ TargetInfo *TargetInfo::CreateTargetInfo(Diagnostic &Diags,
return 0;
}
+ // Set the target C++ ABI.
+ if (!Target->setCXXABI(Opts.CXXABI)) {
+ Diags.Report(diag::err_target_unknown_cxxabi) << Opts.CXXABI;
+ return 0;
+ }
+
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
llvm::StringMap<bool> Features;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp
index 6a47279..524f37e 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/AnalysisConsumer.h"
+#include "clang/Checker/AnalysisConsumer.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -27,9 +27,11 @@
#include "clang/Checker/BugReporter/BugReporter.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/Checker/PathDiagnosticClients.h"
+#include "GRExprEngineExperimentalChecks.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Frontend/AnalyzerOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Path.h"
@@ -79,8 +81,6 @@ public:
const Preprocessor &PP;
const std::string OutDir;
AnalyzerOptions Opts;
- bool declDisplayed;
-
// PD is owned by AnalysisManager.
PathDiagnosticClient *PD;
@@ -94,7 +94,7 @@ public:
const std::string& outdir,
const AnalyzerOptions& opts)
: Ctx(0), PP(pp), OutDir(outdir),
- Opts(opts), declDisplayed(false), PD(0) {
+ Opts(opts), PD(0) {
DigestAnalyzerOptions();
}
@@ -137,10 +137,9 @@ public:
}
void DisplayFunction(const Decl *D) {
- if (!Opts.AnalyzerDisplayProgress || declDisplayed)
+ if (!Opts.AnalyzerDisplayProgress)
return;
- declDisplayed = true;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
llvm::errs() << "ANALYZE: " << Loc.getFilename();
@@ -181,7 +180,7 @@ public:
}
virtual void HandleTranslationUnit(ASTContext &C);
- void HandleCode(Decl *D, Stmt* Body, Actions& actions);
+ void HandleCode(Decl *D, Actions& actions);
};
} // end anonymous namespace
@@ -209,7 +208,8 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction)
break;
- HandleCode(FD, FD->getBody(), FunctionActions);
+ DisplayFunction(FD);
+ HandleCode(FD, FunctionActions);
}
break;
}
@@ -221,14 +221,15 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
Opts.AnalyzeSpecificFunction != MD->getSelector().getAsString())
break;
- HandleCode(MD, MD->getBody(), ObjCMethodActions);
+ DisplayFunction(MD);
+ HandleCode(MD, ObjCMethodActions);
}
break;
}
case Decl::ObjCImplementation: {
ObjCImplementationDecl* ID = cast<ObjCImplementationDecl>(*I);
- HandleCode(ID, 0, ObjCImplementationActions);
+ HandleCode(ID, ObjCImplementationActions);
for (ObjCImplementationDecl::method_iterator MI = ID->meth_begin(),
ME = ID->meth_end(); MI != ME; ++MI) {
@@ -236,7 +237,7 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
Opts.AnalyzeSpecificFunction != (*MI)->getSelector().getAsString())
break;
- HandleCode(*MI, (*MI)->getBody(), ObjCMethodActions);
+ HandleCode(*MI, ObjCMethodActions);
}
}
break;
@@ -269,7 +270,7 @@ static void FindBlocks(DeclContext *D, llvm::SmallVectorImpl<Decl*> &WL) {
FindBlocks(DC, WL);
}
-void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
+void AnalysisConsumer::HandleCode(Decl *D, Actions& actions) {
// Don't run the actions if an error has occured with parsing the file.
Diagnostic &Diags = PP.getDiagnostics();
@@ -278,8 +279,9 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
// Don't run the actions on declarations in header files unless
// otherwise specified.
- if (!Opts.AnalyzeAll &&
- !Ctx->getSourceManager().isFromMainFile(D->getLocation()))
+ SourceManager &SM = Ctx->getSourceManager();
+ SourceLocation SL = SM.getInstantiationLoc(D->getLocation());
+ if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL))
return;
// Clear the AnalysisManager of old AnalysisContexts.
@@ -289,7 +291,7 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
llvm::SmallVector<Decl*, 10> WL;
WL.push_back(D);
- if (Body && Opts.AnalyzeNestedBlocks)
+ if (D->hasBody() && Opts.AnalyzeNestedBlocks)
FindBlocks(cast<DeclContext>(D), WL);
for (Actions::iterator I = actions.begin(), E = actions.end(); I != E; ++I)
@@ -339,6 +341,9 @@ static void ActionGRExprEngine(AnalysisConsumer &C, AnalysisManager& mgr,
if (C.Opts.EnableExperimentalChecks)
RegisterExperimentalChecks(Eng);
+ if (C.Opts.EnableIdempotentOperationChecker)
+ RegisterIdempotentOperationChecker(Eng);
+
// Set the graph auditor.
llvm::OwningPtr<ExplodedNode::Auditor> Auditor;
if (mgr.shouldVisualizeUbigraph()) {
diff --git a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
index 309a74c..d0bccb2 100644
--- a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp
@@ -60,9 +60,10 @@ void AttrNonNullChecker::PreVisitCallExpr(CheckerContext &C,
if (!Att->isNonNull(idx))
continue;
- const SVal &V = state->getSVal(*I);
- const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+ SVal V = state->getSVal(*I);
+ DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+ // If the value is unknown or undefined, we can't perform this check.
if (!DV)
continue;
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
index e89546e..eee5c59 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp
@@ -54,22 +54,28 @@ public:
ISetFactory(statemgr.getAllocator()) {}
const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V);
@@ -94,46 +100,52 @@ ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& statemgr,
return new BasicConstraintManager(statemgr, subengine);
}
+
const GRState*
BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) {
- // First, determine if sym == X, where X != V.
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = (*X != V);
+ bool isFeasible = (*X != Adjusted);
return isFeasible ? state : NULL;
}
- // Second, determine if sym != V.
- if (isNotEqual(state, sym, V))
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
return state;
// If we reach here, sym is not a constant and we don't know if it is != V.
// Make that assumption.
- return AddNE(state, sym, V);
+ return AddNE(state, sym, Adjusted);
}
-const GRState *BasicConstraintManager::AssumeSymEQ(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt &V) {
- // First, determine if sym == X, where X != V.
+const GRState*
+BasicConstraintManager::AssumeSymEQ(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = *X == V;
+ bool isFeasible = (*X == Adjusted);
return isFeasible ? state : NULL;
}
- // Second, determine if sym != V.
- if (isNotEqual(state, sym, V))
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
return NULL;
// If we reach here, sym is not a constant and we don't know if it is == V.
// Make that assumption.
- return AddEQ(state, sym, V);
+ return AddEQ(state, sym, Adjusted);
}
-// These logic will be handled in another ConstraintManager.
-const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt& V) {
+// The logic for these will be handled in another ConstraintManager.
+const GRState*
+BasicConstraintManager::AssumeSymLT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
// Is 'V' the smallest possible value?
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value less than 'V'. This path is infeasible.
@@ -141,13 +153,13 @@ const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
}
// FIXME: For now have assuming x < y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V);
+ return AssumeSymNE(state, sym, V, Adjustment);
}
-const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt& V) {
-
+const GRState*
+BasicConstraintManager::AssumeSymGT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
// Is 'V' the largest possible value?
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value greater than 'V'. This path is infeasible.
@@ -155,56 +167,60 @@ const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state,
}
// FIXME: For now have assuming x > y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V);
+ return AssumeSymNE(state, sym, V, Adjustment);
}
-const GRState *BasicConstraintManager::AssumeSymGE(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt &V) {
-
- // Reject a path if the value of sym is a constant X and !(X >= V).
+const GRState*
+BasicConstraintManager::AssumeSymGE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = *X >= V;
+ bool isFeasible = (*X >= V-Adjustment);
return isFeasible ? state : NULL;
}
// Sym is not a constant, but it is worth looking to see if V is the
// maximum integer value.
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
- // If we know that sym != V, then this condition is infeasible since
- // there is no other value greater than V.
- bool isFeasible = !isNotEqual(state, sym, V);
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value greater than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
// If the path is still feasible then as a consequence we know that
- // 'sym == V' because we cannot have 'sym > V' (no larger values).
+ // 'sym+Adjustment == V' because there are no larger values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, V) : NULL;
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
}
return state;
}
const GRState*
-BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V) {
-
- // Reject a path if the value of sym is a constant X and !(X <= V).
+BasicConstraintManager::AssumeSymLE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj <= V).
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = *X <= V;
+ bool isFeasible = (*X <= V-Adjustment);
return isFeasible ? state : NULL;
}
// Sym is not a constant, but it is worth looking to see if V is the
// minimum integer value.
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
- // If we know that sym != V, then this condition is infeasible since
- // there is no other value less than V.
- bool isFeasible = !isNotEqual(state, sym, V);
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value less than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
// If the path is still feasible then as a consequence we know that
- // 'sym == V' because we cannot have 'sym < V' (no smaller values).
+ // 'sym+Adjustment == V' because there are no smaller values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, V) : NULL;
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
}
return state;
@@ -213,7 +229,7 @@ BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
const GRState* BasicConstraintManager::AddEQ(const GRState* state, SymbolRef sym,
const llvm::APSInt& V) {
// Create a new state with the old binding replaced.
- return state->set<ConstEq>(sym, &V);
+ return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V));
}
const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym,
@@ -224,7 +240,7 @@ const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym
GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
// Now add V to the NE set.
- S = ISetFactory.Add(S, &V);
+ S = ISetFactory.Add(S, &state->getBasicVals().getValue(V));
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
@@ -243,7 +259,7 @@ bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
// See if V is present in the NE-set.
- return T ? T->contains(&V) : false;
+ return T ? T->contains(&state->getBasicVals().getValue(V)) : false;
}
bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym,
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
index b852e2a..ecb2d1c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp
@@ -415,59 +415,72 @@ clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
}
//===----------------------------------------------------------------------===//
-// CFRetain/CFRelease auditing for null arguments.
+// CFRetain/CFRelease checking for null arguments.
//===----------------------------------------------------------------------===//
namespace {
-class AuditCFRetainRelease : public GRSimpleAPICheck {
+class CFRetainReleaseChecker : public CheckerVisitor<CFRetainReleaseChecker> {
APIMisuse *BT;
-
- // FIXME: Either this should be refactored into GRSimpleAPICheck, or
- // it should always be passed with a call to Audit. The latter
- // approach makes this class more stateless.
- ASTContext& Ctx;
IdentifierInfo *Retain, *Release;
- BugReporter& BR;
public:
- AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
- : BT(0), Ctx(ctx),
- Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")),
- BR(br){}
+ CFRetainReleaseChecker(ASTContext& Ctx): BT(NULL),
+ Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease"))
+ {}
- ~AuditCFRetainRelease() {}
+ static void *getTag() { static int x = 0; return &x; }
- bool Audit(ExplodedNode* N, GRStateManager&);
+ void PreVisitCallExpr(CheckerContext& C, const CallExpr* CE);
};
} // end anonymous namespace
-bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
- const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
+void CFRetainReleaseChecker::PreVisitCallExpr(CheckerContext& C,
+ const CallExpr* CE) {
// If the CallExpr doesn't have exactly 1 argument just give up checking.
if (CE->getNumArgs() != 1)
- return false;
+ return;
- // Check if we called CFRetain/CFRelease.
- const GRState* state = N->getState();
+ // Get the function declaration of the callee.
+ const GRState* state = C.getState();
SVal X = state->getSVal(CE->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
if (!FD)
- return false;
+ return;
+ // Check if we called CFRetain/CFRelease.
const IdentifierInfo *FuncII = FD->getIdentifier();
if (!(FuncII == Retain || FuncII == Release))
- return false;
+ return;
+
+ // FIXME: The rest of this just checks that the argument is non-null.
+ // It should probably be refactored and combined with AttrNonNullChecker.
+
+ // Get the argument's value.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg);
+ DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
+ if (!DefArgVal)
+ return;
+
+ // Get a NULL value.
+ ValueManager &ValMgr = C.getValueManager();
+ DefinedSVal Zero = cast<DefinedSVal>(ValMgr.makeZeroVal(Arg->getType()));
+
+ // Make an expression asserting that they're equal.
+ SValuator &SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ArgIsNull = SVator.EvalEQ(state, Zero, *DefArgVal);
+
+ // Are they equal?
+ const GRState *stateTrue, *stateFalse;
+ llvm::tie(stateTrue, stateFalse) = state->Assume(ArgIsNull);
+
+ if (stateTrue && !stateFalse) {
+ ExplodedNode *N = C.GenerateSink(stateTrue);
+ if (!N)
+ return;
- // Finally, check if the argument is NULL.
- // FIXME: We should be able to bifurcate the state here, as a successful
- // check will result in the value not being NULL afterwards.
- // FIXME: Need a way to register vistors for the BugReporter. Would like
- // to benefit from the same diagnostics that regular null dereference
- // reporting has.
- if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) {
if (!BT)
BT = new APIMisuse("null passed to CFRetain/CFRelease");
@@ -475,19 +488,16 @@ bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
? "Null pointer argument in call to CFRetain"
: "Null pointer argument in call to CFRelease";
- RangedBugReport *report = new RangedBugReport(*BT, description, N);
- report->addRange(CE->getArg(0)->getSourceRange());
- BR.EmitReport(report);
- return true;
- }
-
- return false;
-}
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, description, N);
+ report->addRange(Arg->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Arg);
+ C.EmitReport(report);
+ return;
+ }
-GRSimpleAPICheck*
-clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
- return new AuditCFRetainRelease(Ctx, BR);
+ // From here on, we know the argument is non-null.
+ C.addTransition(stateFalse);
}
//===----------------------------------------------------------------------===//
@@ -569,9 +579,10 @@ void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) {
Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
Stmt::ObjCMessageExprClass);
Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
- Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass);
RegisterNSErrorChecks(BR, Eng, D);
RegisterNSAutoreleasePoolChecks(Eng);
+
+ Eng.registerCheck(new CFRetainReleaseChecker(Ctx));
Eng.registerCheck(new ClassReleaseChecker(Ctx));
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
index 679c6dc..8fb0570 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h
@@ -30,9 +30,6 @@ GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
BugReporter& BR);
-GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx,
- BugReporter& BR);
-
void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
void RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng);
diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
index 5be5ca6..62c8d9c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp
@@ -46,9 +46,14 @@ public:
SVal Retrieve(Store store, Loc loc, QualType T = QualType());
- Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS);
+ Store InvalidateRegions(Store store, const MemRegion * const *Begin,
+ const MemRegion * const *End, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals);
+
Store scanForIvars(Stmt *B, const Decl* SelfDecl,
const MemRegion *SelfRegion, Store St);
@@ -72,9 +77,9 @@ public:
/// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
/// It updatees the GRState object in place with the values removed.
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
- const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+ const GRState *RemoveDeadBindings(GRState &state,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
void iterBindings(Store store, BindingsHandler& f);
@@ -144,9 +149,30 @@ SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
// Globals and parameters start with symbolic values.
// Local variables initially are undefined.
+
+ // Non-static globals may have had their values reset by InvalidateRegions.
+ const MemSpaceRegion *MS = VR->getMemorySpace();
+ if (isa<NonStaticGlobalSpaceRegion>(MS)) {
+ BindingsTy B = GetBindings(store);
+ // FIXME: Copy-and-pasted from RegionStore.cpp.
+ if (BindingsTy::data_type *Val = B.lookup(MS)) {
+ if (SymbolRef parentSym = Val->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (Val->isZeroConstant())
+ return ValMgr.makeZeroVal(T);
+
+ if (Val->isUnknownOrUndef())
+ return *Val;
+
+ assert(0 && "Unknown default value.");
+ }
+ }
+
if (VR->hasGlobalsOrParametersStorage() ||
isa<UnknownSpaceRegion>(VR->getMemorySpace()))
return ValMgr.getRegionValueSymbolVal(R);
+
return UndefinedVal();
}
@@ -194,6 +220,14 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
return store;
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ // Special case: a default symbol assigned to the NonStaticGlobalsSpaceRegion
+ // that is used to derive other symbols.
+ if (isa<NonStaticGlobalSpaceRegion>(R)) {
+ BindingsTy B = GetBindings(store);
+ return VBFactory.Add(B, R, V).getRoot();
+ }
+
ASTContext &C = StateMgr.getContext();
// Special case: handle store of pointer values (Loc) to pointers via
@@ -251,7 +285,7 @@ Store BasicStoreManager::Remove(Store store, Loc loc) {
}
}
-const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
@@ -263,14 +297,14 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
// Iterate over the variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
- if (SymReaper.isLive(Loc, VR))
+ if (SymReaper.isLive(VR))
RegionRoots.push_back(VR);
else
continue;
}
- else if (isa<ObjCIvarRegion>(I.getKey())) {
+ else if (isa<ObjCIvarRegion>(I.getKey()) ||
+ isa<NonStaticGlobalSpaceRegion>(I.getKey()))
RegionRoots.push_back(I.getKey());
- }
else
continue;
@@ -292,7 +326,8 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
SymReaper.markLive(SymR->getSymbol());
break;
}
- else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
+ else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR) ||
+ isa<NonStaticGlobalSpaceRegion>(MR)) {
if (Marked.count(MR))
break;
@@ -475,7 +510,8 @@ void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
BindingsTy B = GetBindings(store);
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
- f.HandleBinding(*this, store, I.getKey(), I.getData());
+ if (!f.HandleBinding(*this, store, I.getKey(), I.getData()))
+ return;
}
@@ -485,6 +521,49 @@ StoreManager::BindingsHandler::~BindingsHandler() {}
// Binding invalidation.
//===----------------------------------------------------------------------===//
+
+Store BasicStoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
+ if (invalidateGlobals) {
+ BindingsTy B = GetBindings(store);
+ for (BindingsTy::iterator I=B.begin(), End=B.end(); I != End; ++I) {
+ const MemRegion *R = I.getKey();
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ store = InvalidateRegion(store, R, E, Count, IS);
+ }
+ }
+
+ for ( ; I != End ; ++I) {
+ const MemRegion *R = *I;
+ // Don't invalidate globals twice.
+ if (invalidateGlobals) {
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ continue;
+ }
+ store = InvalidateRegion(store, *I, E, Count, IS);
+ }
+
+ // FIXME: This is copy-and-paste from RegionStore.cpp.
+ if (invalidateGlobals) {
+ // Bind the non-static globals memory space to a new symbol that we will
+ // use to derive the bindings for all non-static globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
+ SVal V =
+ ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+
+ store = Bind(store, loc::MemRegionVal(GS), V);
+ }
+
+ return store;
+}
+
+
Store BasicStoreManager::InvalidateRegion(Store store,
const MemRegion *R,
const Expr *E,
diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
index 3bcc03f..0422d80 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp
@@ -925,7 +925,7 @@ public:
// statement (if it doesn't already exist).
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
if (const CompoundStmt *CS =
- PDB.getCodeDecl().getCompoundBody())
+ dyn_cast_or_null<CompoundStmt>(PDB.getCodeDecl().getBody()))
if (!CS->body_empty()) {
SourceLocation Loc = (*CS->body_begin())->getLocStart();
rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
@@ -1403,7 +1403,7 @@ MakeReportGraph(const ExplodedGraph* G,
// Create a new (third!) graph with a single path. This is the graph
// that will be returned to the caller.
- ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext());
+ ExplodedGraph *GNew = new ExplodedGraph();
// Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
// to the root node, and then construct a new graph that contains only
diff --git a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
index 9c8b516..057e474 100644
--- a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp
@@ -57,15 +57,24 @@ bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
case Builtin::BI__builtin_alloca: {
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = C.getStoreManager().getRegionManager();
- const MemRegion* R =
+ const AllocaRegion* R =
RM.getAllocaRegion(CE, C.getNodeBuilder().getCurrentBlockCount(),
C.getPredecessor()->getLocationContext());
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
- SVal Extent = state->getSVal(*(CE->arg_begin()));
- state = C.getStoreManager().setExtent(state, R, Extent);
+ DefinedOrUnknownSVal Size =
+ cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin())));
+
+ ValueManager& ValMgr = C.getValueManager();
+ DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+
+ SValuator& SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ExtentMatchesSizeArg =
+ SVator.EvalEQ(state, Extent, Size);
+ state = state->Assume(ExtentMatchesSizeArg, true);
+
C.GenerateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
return true;
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
index 42e6f67..3c74cd8 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp
@@ -228,111 +228,111 @@ public:
ErrorOverAutorelease,
ErrorReturnedNotOwned
};
-
+
private:
Kind kind;
RetEffect::ObjKind okind;
unsigned Cnt;
unsigned ACnt;
QualType T;
-
+
RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
: kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
-
+
RefVal(Kind k, unsigned cnt = 0)
: kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
-
+
public:
Kind getKind() const { return kind; }
-
+
RetEffect::ObjKind getObjKind() const { return okind; }
-
+
unsigned getCount() const { return Cnt; }
unsigned getAutoreleaseCount() const { return ACnt; }
unsigned getCombinedCounts() const { return Cnt + ACnt; }
void clearCounts() { Cnt = 0; ACnt = 0; }
void setCount(unsigned i) { Cnt = i; }
void setAutoreleaseCount(unsigned i) { ACnt = i; }
-
+
QualType getType() const { return T; }
-
+
// Useful predicates.
-
+
static bool isError(Kind k) { return k >= ERROR_START; }
-
+
static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
-
+
bool isOwned() const {
return getKind() == Owned;
}
-
+
bool isNotOwned() const {
return getKind() == NotOwned;
}
-
+
bool isReturnedOwned() const {
return getKind() == ReturnedOwned;
}
-
+
bool isReturnedNotOwned() const {
return getKind() == ReturnedNotOwned;
}
-
+
bool isNonLeakError() const {
Kind k = getKind();
return isError(k) && !isLeak(k);
}
-
+
static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 1) {
return RefVal(Owned, o, Count, 0, t);
}
-
+
static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 0) {
return RefVal(NotOwned, o, Count, 0, t);
}
-
+
// Comparison, profiling, and pretty-printing.
-
+
bool operator==(const RefVal& X) const {
return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
}
-
+
RefVal operator-(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() - i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator+(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() + i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator^(Kind k) const {
return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
getType());
}
-
+
RefVal autorelease() const {
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
getType());
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) kind);
ID.AddInteger(Cnt);
ID.AddInteger(ACnt);
ID.Add(T);
}
-
+
void print(llvm::raw_ostream& Out) const;
};
void RefVal::print(llvm::raw_ostream& Out) const {
if (!T.isNull())
Out << "Tracked Type:" << T.getAsString() << '\n';
-
+
switch (getKind()) {
default: assert(false);
case Owned: {
@@ -341,69 +341,69 @@ void RefVal::print(llvm::raw_ostream& Out) const {
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case NotOwned: {
Out << "NotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedOwned: {
Out << "ReturnedOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedNotOwned: {
Out << "ReturnedNotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case Released:
Out << "Released";
break;
-
+
case ErrorDeallocGC:
Out << "-dealloc (GC)";
break;
-
+
case ErrorDeallocNotOwned:
Out << "-dealloc (not-owned)";
break;
-
+
case ErrorLeak:
Out << "Leaked";
break;
-
+
case ErrorLeakReturned:
Out << "Leaked (Bad naming)";
break;
-
+
case ErrorGCLeakReturned:
Out << "Leaked (GC-ed at return)";
break;
-
+
case ErrorUseAfterRelease:
Out << "Use-After-Release [ERROR]";
break;
-
+
case ErrorReleaseNotOwned:
Out << "Release of Not-Owned [ERROR]";
break;
-
+
case RefVal::ErrorOverAutorelease:
Out << "Over autoreleased";
break;
-
+
case RefVal::ErrorReturnedNotOwned:
Out << "Non-owned object returned instead of owned";
break;
}
-
+
if (ACnt) {
Out << " [ARC +" << ACnt << ']';
}
@@ -897,7 +897,7 @@ public:
RetainSummary *getInstanceMethodSummary(const ObjCMessageExpr *ME,
const GRState *state,
const LocationContext *LC);
-
+
RetainSummary* getInstanceMethodSummary(const ObjCMessageExpr* ME,
const ObjCInterfaceDecl* ID) {
return getInstanceMethodSummary(ME->getSelector(), 0,
@@ -927,7 +927,7 @@ public:
break;
}
- return getClassMethodSummary(ME->getSelector(),
+ return getClassMethodSummary(ME->getSelector(),
Class? Class->getIdentifier() : 0,
Class,
ME->getMethodDecl(), ME->getType());
@@ -1419,16 +1419,16 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
if (Receiver) {
receiverV = state->getSValAsScalarOrLoc(Receiver);
-
+
// FIXME: Eventually replace the use of state->get<RefBindings> with
// a generic API for reasoning about the Objective-C types of symbolic
// objects.
if (SymbolRef Sym = receiverV.getAsLocSymbol())
if (const RefVal *T = state->get<RefBindings>(Sym))
- if (const ObjCObjectPointerType* PT =
+ if (const ObjCObjectPointerType* PT =
T->getType()->getAs<ObjCObjectPointerType>())
ID = PT->getInterfaceDecl();
-
+
// FIXME: this is a hack. This may or may not be the actual method
// that is called.
if (!ID) {
@@ -1444,7 +1444,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
RetainSummary *Summ = getInstanceMethodSummary(ME, ID);
-
+
// Special-case: are we sending a mesage to "self"?
// This is a hack. When we have full-IP this should be removed.
if (isa<ObjCMethodDecl>(LC->getDecl()) && Receiver) {
@@ -1461,7 +1461,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
}
}
}
-
+
return Summ ? Summ : getDefaultSummary();
}
@@ -1849,7 +1849,7 @@ public:
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
- Stmt* S, const GRState* state,
+ const GRState* state,
SymbolReaper& SymReaper);
std::pair<ExplodedNode*, const GRState *>
@@ -2619,7 +2619,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
SymbolRef ErrorSym = 0;
llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate;
-
+
for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
SVal V = state->getSValAsScalarOrLoc(*I);
SymbolRef Sym = V.getAsLocSymbol();
@@ -2659,7 +2659,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// approriately delegated to the respective StoreManagers while
// still allowing us to do checker-specific logic (e.g.,
// invalidating reference counts), probably via callbacks.
- if (ER->getElementType()->isIntegralType()) {
+ if (ER->getElementType()->isIntegralOrEnumerationType()) {
const MemRegion *superReg = ER->getSuperRegion();
if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
isa<ObjCIvarRegion>(superReg))
@@ -2667,7 +2667,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
}
// FIXME: What about layers of ElementRegions?
}
-
+
// Mark this region for invalidation. We batch invalidate regions
// below for efficiency.
RegionsToInvalidate.push_back(R);
@@ -2687,37 +2687,39 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
goto tryAgain;
}
}
-
+
// Block calls result in all captured values passed-via-reference to be
// invalidated.
if (const BlockDataRegion *BR = dyn_cast_or_null<BlockDataRegion>(Callee)) {
RegionsToInvalidate.push_back(BR);
}
-
+
// Invalidate regions we designed for invalidation use the batch invalidation
// API.
- if (!RegionsToInvalidate.empty()) {
- // FIXME: We can have collisions on the conjured symbol if the
- // expression *I also creates conjured symbols. We probably want
- // to identify conjured symbols by an expression pair: the enclosing
- // expression (the context) and the expression itself. This should
- // disambiguate conjured symbols.
- unsigned Count = Builder.getCurrentBlockCount();
- StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
-
-
- StoreManager::InvalidatedSymbols IS;
- Store store = state->getStore();
- store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(),
- RegionsToInvalidate.data() +
- RegionsToInvalidate.size(),
- Ex, Count, &IS);
- state = state->makeWithStore(store);
- for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(),
- E = IS.end(); I!=E; ++I) {
- // Remove any existing reference-count binding.
- state = state->remove<RefBindings>(*I);
- }
+
+ // FIXME: We can have collisions on the conjured symbol if the
+ // expression *I also creates conjured symbols. We probably want
+ // to identify conjured symbols by an expression pair: the enclosing
+ // expression (the context) and the expression itself. This should
+ // disambiguate conjured symbols.
+ unsigned Count = Builder.getCurrentBlockCount();
+ StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
+ StoreManager::InvalidatedSymbols IS;
+ Store store = state->getStore();
+
+ // NOTE: Even if RegionsToInvalidate is empty, we must still invalidate
+ // global variables.
+ store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(),
+ RegionsToInvalidate.data() +
+ RegionsToInvalidate.size(),
+ Ex, Count, &IS,
+ /* invalidateGlobals = */ true);
+
+ state = state->makeWithStore(store);
+ for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(),
+ E = IS.end(); I!=E; ++I) {
+ // Remove any existing reference-count binding.
+ state = state->remove<RefBindings>(*I);
}
// Evaluate the effect on the message receiver.
@@ -2862,7 +2864,7 @@ void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
ExplodedNode* Pred) {
RetainSummary *Summ = 0;
-
+
// FIXME: Better support for blocks. For now we stop tracking anything
// that is passed to blocks.
// FIXME: Need to handle variables that are "captured" by the block.
@@ -3400,10 +3402,9 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
- Stmt* S,
const GRState* state,
SymbolReaper& SymReaper) {
-
+ Stmt *S = Builder.getStmt();
RefBindings B = state->get<RefBindings>();
// Update counts from autorelease pools
@@ -3501,7 +3502,7 @@ class RetainReleaseChecker
public:
RetainReleaseChecker(CFRefCount *tf) : TF(tf) {}
static void* getTag() { static int x = 0; return &x; }
-
+
void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE);
};
} // end anonymous namespace
@@ -3509,29 +3510,29 @@ public:
void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
const BlockExpr *BE) {
-
+
// Scan the BlockDecRefExprs for any object the retain/release checker
- // may be tracking.
+ // may be tracking.
if (!BE->hasBlockDeclRefExprs())
return;
-
+
const GRState *state = C.getState();
const BlockDataRegion *R =
cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
-
+
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
-
+
if (I == E)
return;
-
+
// FIXME: For now we invalidate the tracking of all symbols passed to blocks
// via captured variables, even though captured variables result in a copy
// and in implicit increment/decrement of a retain count.
llvm::SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getPredecessor()->getLocationContext();
MemRegionManager &MemMgr = C.getValueManager().getRegionManager();
-
+
for ( ; I != E; ++I) {
const VarRegion *VR = *I;
if (VR->getSuperRegion() == R) {
@@ -3539,7 +3540,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
}
Regions.push_back(VR);
}
-
+
state =
state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
Regions.data() + Regions.size()).getState();
@@ -3552,28 +3553,28 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
BugReporter &BR = Eng.getBugReporter();
-
+
useAfterRelease = new UseAfterRelease(this);
BR.Register(useAfterRelease);
-
+
releaseNotOwned = new BadRelease(this);
BR.Register(releaseNotOwned);
-
+
deallocGC = new DeallocGC(this);
BR.Register(deallocGC);
-
+
deallocNotOwned = new DeallocNotOwned(this);
BR.Register(deallocNotOwned);
-
+
overAutorelease = new OverAutorelease(this);
BR.Register(overAutorelease);
-
+
returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
BR.Register(returnNotOwnedForOwned);
-
+
// First register "return" leaks.
const char* name = 0;
-
+
if (isGCEnabled())
name = "Leak of returned object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
@@ -3583,12 +3584,12 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak of returned object";
}
-
+
// Leaks should not be reported if they are post-dominated by a sink.
leakAtReturn = new LeakAtReturn(this, name);
leakAtReturn->setSuppressOnSink(true);
BR.Register(leakAtReturn);
-
+
// Second, register leaks within a function/method.
if (isGCEnabled())
name = "Leak of object when using garbage collection";
@@ -3599,15 +3600,15 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak";
}
-
+
// Leaks should not be reported if they are post-dominated by sinks.
leakWithinFunction = new LeakWithinFunction(this, name);
leakWithinFunction->setSuppressOnSink(true);
BR.Register(leakWithinFunction);
-
+
// Save the reference to the BugReporter.
this->BR = &BR;
-
+
// Register the RetainReleaseChecker with the GRExprEngine object.
// Functionality in CFRefCount will be migrated to RetainReleaseChecker
// over time.
diff --git a/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt
index 9c6adc6..259346a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_NO_RTTI 1)
add_clang_library(clangChecker
AdjustedReturnValueChecker.cpp
AggExprVisitor.cpp
+ AnalysisConsumer.cpp
ArrayBoundChecker.cpp
AttrNonNullChecker.cpp
BasicConstraintManager.cpp
@@ -12,63 +13,70 @@ add_clang_library(clangChecker
BugReporter.cpp
BugReporterVisitors.cpp
BuiltinFunctionChecker.cpp
+ CFRefCount.cpp
CallAndMessageChecker.cpp
CallInliner.cpp
CastSizeChecker.cpp
CastToStructChecker.cpp
- CFRefCount.cpp
CheckDeadStores.cpp
- Checker.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
CheckSecuritySyntaxOnly.cpp
CheckSizeofPointer.cpp
+ Checker.cpp
CocoaConventions.cpp
+ CStringChecker.cpp
DereferenceChecker.cpp
DivZeroChecker.cpp
Environment.cpp
ExplodedGraph.cpp
FixedAddressChecker.cpp
FlatStore.cpp
+ FrontendActions.cpp
GRBlockCounter.cpp
- GRCoreEngine.cpp
GRCXXExprEngine.cpp
+ GRCoreEngine.cpp
GRExprEngine.cpp
GRExprEngineExperimentalChecks.cpp
GRState.cpp
+ HTMLDiagnostics.cpp
+ IdempotentOperationChecker.cpp
LLVMConventionsChecker.cpp
MacOSXAPIChecker.cpp
MallocChecker.cpp
ManagerRegistry.cpp
MemRegion.cpp
- NoReturnFunctionChecker.cpp
NSAutoreleasePoolChecker.cpp
NSErrorChecker.cpp
- ObjCUnusedIVarsChecker.cpp
+ NoReturnFunctionChecker.cpp
OSAtomicChecker.cpp
+ ObjCUnusedIVarsChecker.cpp
PathDiagnostic.cpp
+ PlistDiagnostics.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
RangeConstraintManager.cpp
RegionStore.cpp
ReturnPointerRangeChecker.cpp
- ReturnStackAddressChecker.cpp
ReturnUndefChecker.cpp
+ SVals.cpp
+ SValuator.cpp
SimpleConstraintManager.cpp
SimpleSValuator.cpp
+ StackAddrLeakChecker.cpp
Store.cpp
- SVals.cpp
- SValuator.cpp
+ StreamChecker.cpp
SymbolManager.cpp
UndefBranchChecker.cpp
UndefCapturedBlockVarChecker.cpp
+ UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
- UndefResultChecker.cpp
UnixAPIChecker.cpp
- ValueManager.cpp
VLASizeChecker.cpp
+ ValueManager.cpp
)
-add_dependencies(clangChecker ClangStmtNodes)
+add_dependencies(clangChecker ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp
new file mode 100644
index 0000000..a92d409
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp
@@ -0,0 +1,525 @@
+//= CStringChecker.h - Checks calls to C string functions ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CStringChecker, which is an assortment of checks on calls
+// to functions in <string.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+namespace {
+class CStringChecker : public CheckerVisitor<CStringChecker> {
+ BugType *BT_Null, *BT_Bounds, *BT_Overlap;
+public:
+ CStringChecker()
+ : BT_Null(0), BT_Bounds(0), BT_Overlap(0) {}
+ static void *getTag() { static int tag; return &tag; }
+
+ bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+
+ typedef void (CStringChecker::*FnCheck)(CheckerContext &, const CallExpr *);
+
+ void EvalMemcpy(CheckerContext &C, const CallExpr *CE);
+ void EvalMemmove(CheckerContext &C, const CallExpr *CE);
+ void EvalBcopy(CheckerContext &C, const CallExpr *CE);
+ void EvalCopyCommon(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *Source, const Expr *Dest,
+ bool Restricted = false);
+
+ void EvalMemcmp(CheckerContext &C, const CallExpr *CE);
+
+ // Utility methods
+ std::pair<const GRState*, const GRState*>
+ AssumeZero(CheckerContext &C, const GRState *state, SVal V, QualType Ty);
+
+ const GRState *CheckNonNull(CheckerContext &C, const GRState *state,
+ const Expr *S, SVal l);
+ const GRState *CheckLocation(CheckerContext &C, const GRState *state,
+ const Expr *S, SVal l);
+ const GRState *CheckBufferAccess(CheckerContext &C, const GRState *state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf = NULL);
+ const GRState *CheckOverlap(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *First,
+ const Expr *Second);
+ void EmitOverlapBug(CheckerContext &C, const GRState *state,
+ const Stmt *First, const Stmt *Second);
+};
+} //end anonymous namespace
+
+void clang::RegisterCStringChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CStringChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// Individual checks and utility methods.
+//===----------------------------------------------------------------------===//
+
+std::pair<const GRState*, const GRState*>
+CStringChecker::AssumeZero(CheckerContext &C, const GRState *state, SVal V,
+ QualType Ty) {
+ DefinedSVal *Val = dyn_cast<DefinedSVal>(&V);
+ if (!Val)
+ return std::pair<const GRState*, const GRState *>(state, state);
+
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+
+ DefinedOrUnknownSVal Zero = ValMgr.makeZeroVal(Ty);
+ DefinedOrUnknownSVal ValIsZero = SV.EvalEQ(state, *Val, Zero);
+
+ return state->Assume(ValIsZero);
+}
+
+const GRState *CStringChecker::CheckNonNull(CheckerContext &C,
+ const GRState *state,
+ const Expr *S, SVal l) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ const GRState *stateNull, *stateNonNull;
+ llvm::tie(stateNull, stateNonNull) = AssumeZero(C, state, l, S->getType());
+
+ if (stateNull && !stateNonNull) {
+ ExplodedNode *N = C.GenerateSink(stateNull);
+ if (!N)
+ return NULL;
+
+ if (!BT_Null)
+ BT_Null = new BuiltinBug("API",
+ "Null pointer argument in call to byte string function");
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null);
+ EnhancedBugReport *report = new EnhancedBugReport(*BT,
+ BT->getDescription(), N);
+
+ report->addRange(S->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, S);
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // From here on, assume that the value is non-null.
+ assert(stateNonNull);
+ return stateNonNull;
+}
+
+// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
+const GRState *CStringChecker::CheckLocation(CheckerContext &C,
+ const GRState *state,
+ const Expr *S, SVal l) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return state;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return state;
+
+ assert(ER->getValueType(C.getASTContext()) == C.getASTContext().CharTy &&
+ "CheckLocation should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *Super = cast<SubRegion>(ER->getSuperRegion());
+ ValueManager &ValMgr = C.getValueManager();
+ SVal Extent = ValMgr.convertToArrayIndex(Super->getExtent(ValMgr));
+ DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent);
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ const GRState *StInBound = state->AssumeInBound(Idx, Size, true);
+ const GRState *StOutBound = state->AssumeInBound(Idx, Size, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.GenerateSink(StOutBound);
+ if (!N)
+ return NULL;
+
+ if (!BT_Bounds)
+ BT_Bounds = new BuiltinBug("Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element "
+ "(buffer overflow)");
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds);
+ RangedBugReport *report = new RangedBugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(S->getSourceRange());
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ return StInBound;
+}
+
+const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
+ const GRState *state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ValueManager &VM = C.getValueManager();
+ SValuator &SV = VM.getSValuator();
+ ASTContext &Ctx = C.getASTContext();
+
+ QualType SizeTy = Ctx.getSizeType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+
+ // Check that the first buffer is non-null.
+ SVal BufVal = state->getSVal(FirstBuf);
+ state = CheckNonNull(C, state, FirstBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ // Get the access length and make sure it is known.
+ SVal LengthVal = state->getSVal(Size);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = cast<NonLoc>(VM.makeIntVal(1, SizeTy));
+ NonLoc LastOffset = cast<NonLoc>(SV.EvalBinOpNN(state, BinaryOperator::Sub,
+ *Length, One, SizeTy));
+
+ // Check that the first buffer is sufficently long.
+ Loc BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, FirstBuf->getType()));
+ SVal BufEnd
+ = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy);
+ state = CheckLocation(C, state, FirstBuf, BufEnd);
+
+ // If the buffer isn't large enough, abort.
+ if (!state)
+ return NULL;
+
+ // If there's a second buffer, check it as well.
+ if (SecondBuf) {
+ BufVal = state->getSVal(SecondBuf);
+ state = CheckNonNull(C, state, SecondBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, SecondBuf->getType()));
+ BufEnd
+ = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy);
+ state = CheckLocation(C, state, SecondBuf, BufEnd);
+ }
+
+ // Large enough or not, return this state!
+ return state;
+}
+
+const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
+ const GRState *state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) {
+ // Do a simple check for overlap: if the two arguments are from the same
+ // buffer, see if the end of the first is greater than the start of the second
+ // or vice versa.
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ValueManager &VM = state->getStateManager().getValueManager();
+ SValuator &SV = VM.getSValuator();
+ ASTContext &Ctx = VM.getContext();
+ const GRState *stateTrue, *stateFalse;
+
+ // Get the buffer values and make sure they're known locations.
+ SVal FirstVal = state->getSVal(First);
+ SVal SecondVal = state->getSVal(Second);
+
+ Loc *FirstLoc = dyn_cast<Loc>(&FirstVal);
+ if (!FirstLoc)
+ return state;
+
+ Loc *SecondLoc = dyn_cast<Loc>(&SecondVal);
+ if (!SecondLoc)
+ return state;
+
+ // Are the two values the same?
+ DefinedOrUnknownSVal EqualTest = SV.EvalEQ(state, *FirstLoc, *SecondLoc);
+ llvm::tie(stateTrue, stateFalse) = state->Assume(EqualTest);
+
+ if (stateTrue && !stateFalse) {
+ // If the values are known to be equal, that's automatically an overlap.
+ EmitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // Assume the two expressions are not equal.
+ assert(stateFalse);
+ state = stateFalse;
+
+ // Which value comes first?
+ QualType CmpTy = Ctx.IntTy;
+ SVal Reverse = SV.EvalBinOpLL(state, BinaryOperator::GT,
+ *FirstLoc, *SecondLoc, CmpTy);
+ DefinedOrUnknownSVal *ReverseTest = dyn_cast<DefinedOrUnknownSVal>(&Reverse);
+ if (!ReverseTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->Assume(*ReverseTest);
+
+ if (stateTrue) {
+ if (stateFalse) {
+ // If we don't know which one comes first, we can't perform this test.
+ return state;
+ } else {
+ // Switch the values so that FirstVal is before SecondVal.
+ Loc *tmpLoc = FirstLoc;
+ FirstLoc = SecondLoc;
+ SecondLoc = tmpLoc;
+
+ // Switch the Exprs as well, so that they still correspond.
+ const Expr *tmpExpr = First;
+ First = Second;
+ Second = tmpExpr;
+ }
+ }
+
+ // Get the length, and make sure it too is known.
+ SVal LengthVal = state->getSVal(Size);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Convert the first buffer's start address to char*.
+ // Bail out if the cast fails.
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal FirstStart = SV.EvalCast(*FirstLoc, CharPtrTy, First->getType());
+ Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart);
+ if (!FirstStartLoc)
+ return state;
+
+ // Compute the end of the first buffer. Bail out if THAT fails.
+ SVal FirstEnd = SV.EvalBinOpLN(state, BinaryOperator::Add,
+ *FirstStartLoc, *Length, CharPtrTy);
+ Loc *FirstEndLoc = dyn_cast<Loc>(&FirstEnd);
+ if (!FirstEndLoc)
+ return state;
+
+ // Is the end of the first buffer past the start of the second buffer?
+ SVal Overlap = SV.EvalBinOpLL(state, BinaryOperator::GT,
+ *FirstEndLoc, *SecondLoc, CmpTy);
+ DefinedOrUnknownSVal *OverlapTest = dyn_cast<DefinedOrUnknownSVal>(&Overlap);
+ if (!OverlapTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->Assume(*OverlapTest);
+
+ if (stateTrue && !stateFalse) {
+ // Overlap!
+ EmitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // Assume the two expressions don't overlap.
+ assert(stateFalse);
+ return stateFalse;
+}
+
+void CStringChecker::EmitOverlapBug(CheckerContext &C, const GRState *state,
+ const Stmt *First, const Stmt *Second) {
+ ExplodedNode *N = C.GenerateSink(state);
+ if (!N)
+ return;
+
+ if (!BT_Overlap)
+ BT_Overlap = new BugType("Unix API", "Improper arguments");
+
+ // Generate a report for this bug.
+ RangedBugReport *report =
+ new RangedBugReport(*BT_Overlap,
+ "Arguments must not be overlapping buffers", N);
+ report->addRange(First->getSourceRange());
+ report->addRange(Second->getSourceRange());
+
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Evaluation of individual function calls.
+//===----------------------------------------------------------------------===//
+
+void CStringChecker::EvalCopyCommon(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *Dest,
+ const Expr *Source, bool Restricted) {
+ // See if the size argument is zero.
+ SVal SizeVal = state->getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ const GRState *StZeroSize, *StNonZeroSize;
+ llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+
+ // If the size is zero, there won't be any actual memory access.
+ if (StZeroSize)
+ C.addTransition(StZeroSize);
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (StNonZeroSize) {
+ state = StNonZeroSize;
+ state = CheckBufferAccess(C, state, Size, Dest, Source);
+ if (Restricted)
+ state = CheckOverlap(C, state, Size, Dest, Source);
+ if (state)
+ C.addTransition(state);
+ }
+}
+
+
+void CStringChecker::EvalMemcpy(CheckerContext &C, const CallExpr *CE) {
+ // void *memcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ const GRState *state = C.getState();
+ state = state->BindExpr(CE, state->getSVal(Dest));
+ EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1), true);
+}
+
+void CStringChecker::EvalMemmove(CheckerContext &C, const CallExpr *CE) {
+ // void *memmove(void *dst, const void *src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ const GRState *state = C.getState();
+ state = state->BindExpr(CE, state->getSVal(Dest));
+ EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1));
+}
+
+void CStringChecker::EvalBcopy(CheckerContext &C, const CallExpr *CE) {
+ // void bcopy(const void *src, void *dst, size_t n);
+ EvalCopyCommon(C, C.getState(), CE->getArg(2), CE->getArg(1), CE->getArg(0));
+}
+
+void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
+ // int memcmp(const void *s1, const void *s2, size_t n);
+ const Expr *Left = CE->getArg(0);
+ const Expr *Right = CE->getArg(1);
+ const Expr *Size = CE->getArg(2);
+
+ const GRState *state = C.getState();
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+
+ // See if the size argument is zero.
+ SVal SizeVal = state->getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ const GRState *StZeroSize, *StNonZeroSize;
+ llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check either of the buffers.
+ if (StZeroSize) {
+ state = StZeroSize;
+ state = state->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (StNonZeroSize) {
+ state = StNonZeroSize;
+
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(state->getSVal(Left));
+ DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(state->getSVal(Right));
+
+ // See if they are the same.
+ DefinedOrUnknownSVal SameBuf = SV.EvalEQ(state, LV, RV);
+ const GRState *StSameBuf, *StNotSameBuf;
+ llvm::tie(StSameBuf, StNotSameBuf) = state->Assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is zero,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ state = StSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left);
+ if (state) {
+ state = StSameBuf->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+ }
+
+ // If the two arguments might be different buffers, we have to check the
+ // size of both of them.
+ if (StNotSameBuf) {
+ state = StNotSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left, Right);
+ if (state) {
+ // The return value is the comparison result, which we don't know.
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ SVal CmpV = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+ state = state->BindExpr(CE, CmpV);
+ C.addTransition(state);
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// The driver method.
+//===----------------------------------------------------------------------===//
+
+bool CStringChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // Get the callee. All the functions we care about are C functions
+ // with simple identifiers.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl();
+
+ if (!FD)
+ return false;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ llvm::StringRef Name = FD->getName();
+ if (Name.startswith("__builtin_"))
+ Name = Name.substr(10);
+
+ FnCheck EvalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Cases("memcpy", "__memcpy_chk", &CStringChecker::EvalMemcpy)
+ .Cases("memcmp", "bcmp", &CStringChecker::EvalMemcmp)
+ .Cases("memmove", "__memmove_chk", &CStringChecker::EvalMemmove)
+ .Case("bcopy", &CStringChecker::EvalBcopy)
+ .Default(NULL);
+
+ // If the callee isn't a string function, let another checker handle it.
+ if (!EvalFunction)
+ return false;
+
+ // Check and evaluate the call.
+ (this->*EvalFunction)(C, CE);
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp
index 88e1a05..c47e06c 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp
@@ -42,7 +42,7 @@ bool CallInliner::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
if (!FD)
return false;
- if (!FD->getBody(FD))
+ if (!FD->hasBody(FD))
return false;
// Now we have the definition of the callee, create a CallEnter node.
diff --git a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
index 754d775..a502c10 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp
@@ -44,7 +44,8 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
QualType ToPointeeTy = ToPTy->getPointeeType();
- const MemRegion *R = C.getState()->getSVal(E).getAsRegion();
+ const GRState *state = C.getState();
+ const MemRegion *R = state->getSVal(E).getAsRegion();
if (R == 0)
return;
@@ -52,17 +53,21 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
if (SR == 0)
return;
- llvm::Optional<SVal> V =
- C.getEngine().getStoreManager().getExtent(C.getState(), SR);
- if (!V)
- return;
+ ValueManager &ValMgr = C.getValueManager();
+ SVal Extent = SR->getExtent(ValMgr);
- const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(V);
- if (!CI)
+ SValuator &SVator = ValMgr.getSValuator();
+ const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent);
+ if (!ExtentInt)
return;
- CharUnits RegionSize = CharUnits::fromQuantity(CI->getValue().getSExtValue());
+ CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue());
CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
+ // Ignore void, and a few other un-sizeable types.
+ if (TypeSize.isZero())
+ return;
+
if (RegionSize % TypeSize != 0) {
if (ExplodedNode *N = C.GenerateSink()) {
if (!BT)
diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
index 74e12b1..af85c2f 100644
--- a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp
@@ -191,8 +191,8 @@ void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
// Does at least one of the variables have a floating point type?
- drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL;
- drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL;
+ drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL;
+ drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : NULL;
if (!drLHS && !drRHS)
return;
diff --git a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
index addfc21..48152ce 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp
@@ -125,7 +125,7 @@ static bool isBlockExprInCallers(const Stmt *E, const LocationContext *LC) {
// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
Environment
-EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
+EnvironmentManager::RemoveDeadBindings(Environment Env,
SymbolReaper &SymReaper,
const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
@@ -163,7 +163,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
if (!C.isBlkExpr(BlkExpr))
continue;
- if (SymReaper.isLive(S, BlkExpr)) {
+ if (SymReaper.isLive(BlkExpr)) {
// Copy the binding to the new map.
NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
diff --git a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
index 7f1c579..64575b3c9 100644
--- a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp
@@ -44,7 +44,7 @@ public:
}
SVal ArrayToPointer(Loc Array);
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
@@ -59,6 +59,11 @@ public:
Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS);
+
+ Store InvalidateRegions(Store store, const MemRegion * const *I,
+ const MemRegion * const *E, const Expr *Ex,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals);
void print(Store store, llvm::raw_ostream& Out, const char* nl,
const char *sep);
@@ -141,9 +146,20 @@ Store FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR) {
return store;
}
+Store FlatStoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *E,
+ const Expr *Ex, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
+ assert(false && "Not implemented");
+ return store;
+}
+
Store FlatStoreManager::InvalidateRegion(Store store, const MemRegion *R,
const Expr *E, unsigned Count,
InvalidatedSymbols *IS) {
+ assert(false && "Not implemented");
return store;
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp
new file mode 100644
index 0000000..d9a54a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp
@@ -0,0 +1,21 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/FrontendActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Checker/AnalysisConsumer.h"
+using namespace clang;
+
+ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ return CreateAnalysisConsumer(CI.getPreprocessor(),
+ CI.getFrontendOpts().OutputFile,
+ CI.getAnalyzerOpts());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
index 23a87d3..a816186 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp
@@ -221,6 +221,7 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
}
}
+ SubEngine.ProcessEndWorklist(WList->hasWork() || BlockAborted);
return WList->hasWork();
}
@@ -257,7 +258,10 @@ void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
// FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter()))
- GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
+ GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()),
+ Pred->State, Pred);
+ else
+ BlockAborted = true;
}
void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
index 2417658..07fee9d 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp
@@ -172,15 +172,39 @@ public:
void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
ExplodedNodeSet &Src, bool isPrevisit) {
- if (Checkers.empty()) {
+ // Determine if we already have a cached 'CheckersOrdered' vector
+ // specifically tailored for the provided <Stmt kind, isPrevisit>. This
+ // can reduce the number of checkers actually called.
+ CheckersOrdered *CO = &Checkers;
+ llvm::OwningPtr<CheckersOrdered> NewCO;
+
+ const std::pair<unsigned, unsigned> &K =
+ std::make_pair((unsigned)S->getStmtClass(), isPrevisit ? 1U : 0U);
+
+ CheckersOrdered *& CO_Ref = COCache[K];
+
+ if (!CO_Ref) {
+ // If we have no previously cached CheckersOrdered vector for this
+ // statement kind, then create one.
+ NewCO.reset(new CheckersOrdered);
+ }
+ else {
+ // Use the already cached set.
+ CO = CO_Ref;
+ }
+
+ if (CO->empty()) {
+ // If there are no checkers, return early without doing any
+ // more work.
Dst.insert(Src);
return;
}
ExplodedNodeSet Tmp;
ExplodedNodeSet *PrevSet = &Src;
+ unsigned checkersEvaluated = 0;
- for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){
+ for (CheckersOrdered::iterator I=CO->begin(), E=CO->end(); I!=E; ++I){
ExplodedNodeSet *CurrSet = 0;
if (I+1 == E)
CurrSet = &Dst;
@@ -190,12 +214,30 @@ void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
}
void *tag = I->first;
Checker *checker = I->second;
+ bool respondsToCallback = true;
for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
- NI != NE; ++NI)
- checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit);
+ NI != NE; ++NI) {
+
+ checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit,
+ respondsToCallback);
+
+ }
+
PrevSet = CurrSet;
+
+ if (NewCO.get()) {
+ ++checkersEvaluated;
+ if (respondsToCallback)
+ NewCO->push_back(*I);
+ }
}
+
+ // If we built NewCO, check if we called all the checkers. This is important
+ // so that we know that we accurately determined the entire set of checkers
+ // that responds to this callback.
+ if (NewCO.get() && checkersEvaluated == Checkers.size())
+ CO_Ref = NewCO.take();
// Don't autotransition. The CheckerContext objects should do this
// automatically.
@@ -312,18 +354,20 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
// automatically. Note that the check itself is owned by the GRExprEngine
// object.
RegisterAdjustedReturnValueChecker(Eng);
- RegisterAttrNonNullChecker(Eng);
+ // CallAndMessageChecker should be registered before AttrNonNullChecker,
+ // where we assume arguments are not undefined.
RegisterCallAndMessageChecker(Eng);
+ RegisterAttrNonNullChecker(Eng);
RegisterDereferenceChecker(Eng);
RegisterVLASizeChecker(Eng);
RegisterDivZeroChecker(Eng);
- RegisterReturnStackAddressChecker(Eng);
RegisterReturnUndefChecker(Eng);
RegisterUndefinedArraySubscriptChecker(Eng);
RegisterUndefinedAssignmentChecker(Eng);
RegisterUndefBranchChecker(Eng);
RegisterUndefCapturedBlockVarChecker(Eng);
RegisterUndefResultChecker(Eng);
+ RegisterStackAddrLeakChecker(Eng);
// This is not a checker yet.
RegisterNoReturnFunctionChecker(Eng);
@@ -335,10 +379,10 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
: AMgr(mgr),
- CoreEngine(mgr.getASTContext(), *this),
+ CoreEngine(*this),
G(CoreEngine.getGraph()),
Builder(NULL),
- StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
+ StateMgr(getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator(),
*this),
SymMgr(StateMgr.getSymbolManager()),
@@ -346,7 +390,7 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
SVator(ValMgr.getSValuator()),
CurrentStmt(NULL),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
- RaiseSel(GetNullarySelector("raise", G.getContext())),
+ RaiseSel(GetNullarySelector("raise", getContext())),
BR(mgr, *this), TF(tf) {
// Register internal checks.
RegisterInternalChecks(*this);
@@ -359,8 +403,14 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
GRExprEngine::~GRExprEngine() {
BR.FlushReports();
delete [] NSExceptionInstanceRaiseSelectors;
+
+ // Delete the set of checkers.
for (CheckersOrdered::iterator I=Checkers.begin(), E=Checkers.end(); I!=E;++I)
delete I->second;
+
+ for (CheckersOrderedCache::iterator I=COCache.begin(), E=COCache.end();
+ I!=E;++I)
+ delete I->second;
}
//===----------------------------------------------------------------------===//
@@ -464,6 +514,13 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
return TF->EvalAssume(state, cond, assumption);
}
+void GRExprEngine::ProcessEndWorklist(bool hasWorkRemaining) {
+ for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end();
+ I != E; ++I) {
+ I->second->VisitEndAnalysis(G, BR, hasWorkRemaining);
+ }
+}
+
void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
CurrentStmt = CE.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
@@ -480,10 +537,10 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
// Create the cleaned state.
const ExplodedNode *BasePred = Builder->getBasePredecessor();
- SymbolReaper SymReaper(BasePred->getLocationContext(), SymMgr);
+ SymbolReaper SymReaper(BasePred->getLocationContext(), CurrentStmt, SymMgr);
CleanedState = AMgr.shouldPurgeDead()
- ? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt,
+ ? StateMgr.RemoveDeadBindings(EntryNode->getState(),
BasePred->getLocationContext()->getCurrentStackFrame(),
SymReaper)
: EntryNode->getState();
@@ -502,7 +559,7 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
// FIXME: This should soon be removed.
ExplodedNodeSet Tmp2;
- getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode, CurrentStmt,
+ getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode,
CleanedState, SymReaper);
if (Checkers.empty())
@@ -598,7 +655,7 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
- case Stmt::CXXZeroInitValueExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
@@ -627,10 +684,14 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
break;
+ case Stmt::GNUNullExprClass: {
+ MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, ValMgr.makeNull()));
+ break;
+ }
+
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::ExtVectorElementExprClass:
- case Stmt::GNUNullExprClass:
case Stmt::ImaginaryLiteralClass:
case Stmt::ImplicitValueInitExprClass:
case Stmt::ObjCAtCatchStmtClass:
@@ -901,7 +962,7 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
// C++ stuff we don't support yet.
case Stmt::CXXExprWithTemporariesClass:
case Stmt::CXXMemberCallExprClass:
- case Stmt::CXXZeroInitValueExprClass: {
+ case Stmt::CXXScalarValueInitExprClass: {
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
Builder->BuildSinks = true;
MakeNode(Dst, Ex, Pred, GetState(Pred));
@@ -998,16 +1059,21 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
CreateCXXTemporaryObject(Ex, Pred, Dst);
return;
- default:
+ default: {
// Arbitrary subexpressions can return aggregate temporaries that
// can be used in a lvalue context. We need to enhance our support
// of such temporaries in both the environment and the store, so right
// now we just do a regular visit.
- assert ((Ex->getType()->isAggregateType()) &&
- "Other kinds of expressions with non-aggregate/union types do"
- " not have lvalues.");
+
+ // NOTE: Do not use 'isAggregateType()' here as CXXRecordDecls that
+ // are non-pod are not aggregates.
+ assert ((isa<RecordType>(Ex->getType().getDesugaredType()) ||
+ isa<ArrayType>(Ex->getType().getDesugaredType())) &&
+ "Other kinds of expressions with non-aggregate/union/class types"
+ " do not have lvalues.");
Visit(Ex, Pred, Dst);
+ }
}
}
@@ -1819,7 +1885,7 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
if (!FD)
return false;
- if (!FD->getBody(FD))
+ if (!FD->hasBody(FD))
return false;
// Now we have the definition of the callee, create a CallEnter node.
@@ -1940,7 +2006,8 @@ void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
// Finally, perform the post-condition check of the CallExpr and store
// the created nodes in 'Dst'.
-
+ // If the callee returns a reference and we want an rvalue, skip this check
+ // and do the load.
if (!(!asLValue && CalleeReturnsReference(CE))) {
CheckerVisit(CE, Dst, DstTmp3, false);
return;
@@ -2371,6 +2438,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
case CastExpr::CK_Unknown:
case CastExpr::CK_ArrayToPointerDecay:
case CastExpr::CK_BitCast:
+ case CastExpr::CK_LValueBitCast:
case CastExpr::CK_IntegralCast:
case CastExpr::CK_IntegralToPointer:
case CastExpr::CK_PointerToIntegral:
@@ -2380,7 +2448,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_DerivedToBase:
- case CastExpr::CK_UncheckedDerivedToBase:
+ case CastExpr::CK_UncheckedDerivedToBase: {
// Delegate to SValuator to process.
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
ExplodedNode* N = *I;
@@ -2391,10 +2459,24 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
MakeNode(Dst, CastE, N, state);
}
return;
-
- default:
- llvm::errs() << "Cast kind " << CastE->getCastKind() << " not handled.\n";
- assert(0);
+ }
+
+ // Various C++ casts that are not handled yet.
+ case CastExpr::CK_Dynamic:
+ case CastExpr::CK_ToUnion:
+ case CastExpr::CK_BaseToDerived:
+ case CastExpr::CK_NullToMemberPointer:
+ case CastExpr::CK_BaseToDerivedMemberPointer:
+ case CastExpr::CK_DerivedToBaseMemberPointer:
+ case CastExpr::CK_UserDefinedConversion:
+ case CastExpr::CK_ConstructorConversion:
+ case CastExpr::CK_VectorSplat:
+ case CastExpr::CK_MemberPointerToBoolean: {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, CastE, Pred, GetState(Pred));
+ return;
+ }
}
}
@@ -2615,9 +2697,38 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
// sizeof(void) == 1 byte.
amt = CharUnits::One();
}
- else if (!T.getTypePtr()->isConstantSizeType()) {
- // FIXME: Add support for VLAs.
- Dst.Add(Pred);
+ else if (!T->isConstantSizeType()) {
+ assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+
+ // FIXME: Add support for VLA type arguments, not just VLA expressions.
+ // When that happens, we should probably refactor VLASizeChecker's code.
+ if (Ex->isArgumentType()) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ // Get the size by getting the extent of the sub-expression.
+ // First, visit the sub-expression to find its region.
+ Expr *Arg = Ex->getArgumentExpr();
+ ExplodedNodeSet Tmp;
+ VisitLValue(Arg, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ const MemRegion *MR = state->getSVal(Arg).getAsRegion();
+
+ // If the subexpression can't be resolved to a region, we don't know
+ // anything about its size. Just leave the state as is and continue.
+ if (!MR) {
+ Dst.Add(*I);
+ continue;
+ }
+
+ // The result is the extent of the VLA.
+ SVal Extent = cast<SubRegion>(MR)->getExtent(ValMgr);
+ MakeNode(Dst, Ex, *I, state->BindExpr(Ex, Extent));
+ }
+
return;
}
else if (T->getAs<ObjCObjectType>()) {
@@ -2749,7 +2860,7 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
return;
}
- case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
+ case UnaryOperator::Plus: assert(!asLValue); // FALL-THROUGH.
case UnaryOperator::Extension: {
// Unary "+" is a no-op, similar to a parentheses. We still have places
@@ -2759,7 +2870,11 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
+
+ if (asLValue)
+ VisitLValue(Ex, Pred, Tmp);
+ else
+ Visit(Ex, Pred, Tmp);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
index 6066a1c..d138e81 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp
@@ -23,6 +23,8 @@ void clang::RegisterExperimentalChecks(GRExprEngine &Eng) {
// within GRExprEngine.
RegisterPthreadLockChecker(Eng);
RegisterMallocChecker(Eng);
+ RegisterStreamChecker(Eng);
+ RegisterCStringChecker(Eng);
}
void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
@@ -38,4 +40,5 @@ void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
RegisterCastToStructChecker(Eng);
RegisterCastSizeChecker(Eng);
RegisterArrayBoundChecker(Eng);
+
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
index 9a9da32..7d1eb77 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h
@@ -19,8 +19,11 @@ namespace clang {
class GRExprEngine;
+void RegisterCStringChecker(GRExprEngine &Eng);
void RegisterPthreadLockChecker(GRExprEngine &Eng);
void RegisterMallocChecker(GRExprEngine &Eng);
+void RegisterStreamChecker(GRExprEngine &Eng);
+void RegisterIdempotentOperationChecker(GRExprEngine &Eng);
} // end clang namespace
#endif
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
index 335b85e..f91a759 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
+++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h
@@ -34,8 +34,8 @@ void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
void RegisterPointerArithChecker(GRExprEngine &Eng);
void RegisterPointerSubChecker(GRExprEngine &Eng);
void RegisterReturnPointerRangeChecker(GRExprEngine &Eng);
-void RegisterReturnStackAddressChecker(GRExprEngine &Eng);
void RegisterReturnUndefChecker(GRExprEngine &Eng);
+void RegisterStackAddrLeakChecker(GRExprEngine &Eng);
void RegisterUndefBranchChecker(GRExprEngine &Eng);
void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng);
void RegisterUndefResultChecker(GRExprEngine &Eng);
diff --git a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
index b16e922..9e584b5 100644
--- a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp
@@ -34,7 +34,7 @@ GRStateManager::~GRStateManager() {
}
const GRState*
-GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+GRStateManager::RemoveDeadBindings(const GRState* state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
@@ -47,11 +47,11 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
llvm::SmallVector<const MemRegion*, 10> RegionRoots;
GRState NewState = *state;
- NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper,
+ NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, SymReaper,
state, RegionRoots);
// Clean up the store.
- const GRState *s = StoreMgr->RemoveDeadBindings(NewState, Loc, LCtx,
+ const GRState *s = StoreMgr->RemoveDeadBindings(NewState, LCtx,
SymReaper, RegionRoots);
return ConstraintMgr->RemoveDeadBindings(s, SymReaper);
@@ -343,28 +343,3 @@ bool GRState::scanReachableSymbols(const MemRegion * const *I,
}
return true;
}
-
-//===----------------------------------------------------------------------===//
-// Queries.
-//===----------------------------------------------------------------------===//
-
-bool GRStateManager::isEqual(const GRState* state, const Expr* Ex,
- const llvm::APSInt& Y) {
-
- SVal V = state->getSVal(Ex);
-
- if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
- return X->getValue() == Y;
-
- if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
- return X->getValue() == Y;
-
- if (SymbolRef Sym = V.getAsSymbol())
- return ConstraintMgr->isEqual(state, Sym, Y);
-
- return false;
-}
-
-bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) {
- return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
-}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp
index 022a34d..ff9867f 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/HTMLDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Checker/PathDiagnosticClients.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -294,8 +294,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
llvm::raw_fd_ostream os(H.c_str(), ErrorMsg);
if (!ErrorMsg.empty()) {
- (llvm::errs() << "warning: could not create file '" << F.str()
- << "'\n").flush();
+ llvm::errs() << "warning: could not create file '" << F.str()
+ << "'\n";
return;
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp
new file mode 100644
index 0000000..6ed1841
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp
@@ -0,0 +1,454 @@
+//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of path-sensitive checks for idempotent and/or
+// tautological operations. Each potential operation is checked along all paths
+// to see if every path results in a pointless operation.
+// +-------------------------------------------+
+// |Table of idempotent/tautological operations|
+// +-------------------------------------------+
+//+--------------------------------------------------------------------------+
+//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x |
+//+--------------------------------------------------------------------------+
+// +, += | | | | x | x | |
+// -, -= | | | | x | -x | |
+// *, *= | | x | x | 0 | 0 | |
+// /, /= | 1 | x | | N/A | 0 | |
+// &, &= | x | | | 0 | 0 | x | x
+// |, |= | x | | | x | x | ~0 | ~0
+// ^, ^= | 0 | | | x | x | |
+// <<, <<= | | | | x | 0 | |
+// >>, >>= | | | | x | 0 | |
+// || | 1 | 1 | 1 | x | x | 1 | 1
+// && | 1 | x | x | 0 | 0 | x | x
+// = | x | | | | | |
+// == | 1 | | | | | |
+// >= | 1 | | | | | |
+// <= | 1 | | | | | |
+// > | 0 | | | | | |
+// < | 0 | | | | | |
+// != | 0 | | | | | |
+//===----------------------------------------------------------------------===//
+//
+// Ways to reduce false positives (that need to be implemented):
+// - Don't flag downsizing casts
+// - Improved handling of static/global variables
+// - Per-block marking of incomplete analysis
+// - Handling ~0 values
+// - False positives involving silencing unused variable warnings
+//
+// Other things TODO:
+// - Improved error messages
+// - Handle mixed assumptions (which assumptions can belong together?)
+// - Finer grained false positive control (levels)
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+namespace {
+class IdempotentOperationChecker
+ : public CheckerVisitor<IdempotentOperationChecker> {
+ public:
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+ void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B,
+ bool hasWorkRemaining);
+
+ private:
+ // Our assumption about a particular operation.
+ enum Assumption { Possible, Impossible, Equal, LHSis1, RHSis1, LHSis0,
+ RHSis0 };
+
+ void UpdateAssumption(Assumption &A, const Assumption &New);
+
+ /// contains* - Useful recursive methods to see if a statement contains an
+ /// element somewhere. Used in static analysis to reduce false positives.
+ static bool containsMacro(const Stmt *S);
+ static bool containsEnum(const Stmt *S);
+ static bool containsBuiltinOffsetOf(const Stmt *S);
+ static bool containsZeroConstant(const Stmt *S);
+ static bool containsOneConstant(const Stmt *S);
+ template <class T> static bool containsStmt(const Stmt *S) {
+ if (isa<T>(S))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsStmt<T>(child))
+ return true;
+
+ return false;
+ }
+
+ // Hash table
+ typedef llvm::DenseMap<const BinaryOperator *, Assumption> AssumptionMap;
+ AssumptionMap hash;
+};
+}
+
+void *IdempotentOperationChecker::getTag() {
+ static int x = 0;
+ return &x;
+}
+
+void clang::RegisterIdempotentOperationChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new IdempotentOperationChecker());
+}
+
+void IdempotentOperationChecker::PreVisitBinaryOperator(
+ CheckerContext &C,
+ const BinaryOperator *B) {
+ // Find or create an entry in the hash for this BinaryOperator instance
+ AssumptionMap::iterator i = hash.find(B);
+ Assumption &A = i == hash.end() ? hash[B] : i->second;
+
+ // If we had to create an entry, initialise the value to Possible
+ if (i == hash.end())
+ A = Possible;
+
+ // If we already have visited this node on a path that does not contain an
+ // idempotent operation, return immediately.
+ if (A == Impossible)
+ return;
+
+ // Skip binary operators containing common false positives
+ if (containsMacro(B) || containsEnum(B) || containsStmt<SizeOfAlignOfExpr>(B)
+ || containsZeroConstant(B) || containsOneConstant(B)
+ || containsBuiltinOffsetOf(B)) {
+ A = Impossible;
+ return;
+ }
+
+ const Expr *LHS = B->getLHS();
+ const Expr *RHS = B->getRHS();
+
+ const GRState *state = C.getState();
+
+ SVal LHSVal = state->getSVal(LHS);
+ SVal RHSVal = state->getSVal(RHS);
+
+ // If either value is unknown, we can't be 100% sure of all paths.
+ if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
+ A = Impossible;
+ return;
+ }
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ // Dereference the LHS SVal if this is an assign operation
+ switch (Op) {
+ default:
+ break;
+
+ // Fall through intentional
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::Assign:
+ // Assign statements have one extra level of indirection
+ if (!isa<Loc>(LHSVal)) {
+ A = Impossible;
+ return;
+ }
+ LHSVal = state->getSVal(cast<Loc>(LHSVal));
+ }
+
+
+ // We now check for various cases which result in an idempotent operation.
+
+ // x op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::Assign:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Div:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (LHSVal != RHSVal)
+ break;
+ UpdateAssumption(A, Equal);
+ return;
+ }
+
+ // x op 1
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!RHSVal.isConstant(1))
+ break;
+ UpdateAssumption(A, RHSis1);
+ return;
+ }
+
+ // 1 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::Mul:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!LHSVal.isConstant(1))
+ break;
+ UpdateAssumption(A, LHSis1);
+ return;
+ }
+
+ // x op 0
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Mul:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!RHSVal.isConstant(0))
+ break;
+ UpdateAssumption(A, RHSis0);
+ return;
+ }
+
+ // 0 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ //case BinaryOperator::AddAssign: // Common false positive
+ case BinaryOperator::SubAssign: // Check only if unsigned
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ //case BinaryOperator::OrAssign: // Common false positive
+ //case BinaryOperator::XorAssign: // Common false positive
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!LHSVal.isConstant(0))
+ break;
+ UpdateAssumption(A, LHSis0);
+ return;
+ }
+
+ // If we get to this point, there has been a valid use of this operation.
+ A = Impossible;
+}
+
+void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ bool hasWorkRemaining) {
+ // If there is any work remaining we cannot be 100% sure about our warnings
+ if (hasWorkRemaining)
+ return;
+
+ // Iterate over the hash to see if we have any paths with definite
+ // idempotent operations.
+ for (AssumptionMap::const_iterator i =
+ hash.begin(); i != hash.end(); ++i) {
+ if (i->second != Impossible) {
+ // Select the error message.
+ const char *msg = 0;
+ switch (i->second) {
+ case Equal:
+ msg = "idempotent operation; both operands are always equal in value";
+ break;
+ case LHSis1:
+ msg = "idempotent operation; the left operand is always 1";
+ break;
+ case RHSis1:
+ msg = "idempotent operation; the right operand is always 1";
+ break;
+ case LHSis0:
+ msg = "idempotent operation; the left operand is always 0";
+ break;
+ case RHSis0:
+ msg = "idempotent operation; the right operand is always 0";
+ break;
+ case Possible:
+ llvm_unreachable("Operation was never marked with an assumption");
+ case Impossible:
+ llvm_unreachable(0);
+ }
+
+ // Create the SourceRange Arrays
+ SourceRange S[2] = { i->first->getLHS()->getSourceRange(),
+ i->first->getRHS()->getSourceRange() };
+ B.EmitBasicReport("Idempotent operation", msg, i->first->getOperatorLoc(),
+ S, 2);
+ }
+ }
+}
+
+// Updates the current assumption given the new assumption
+inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A,
+ const Assumption &New) {
+ switch (A) {
+ // If we don't currently have an assumption, set it
+ case Possible:
+ A = New;
+ return;
+
+ // If we have determined that a valid state happened, ignore the new
+ // assumption.
+ case Impossible:
+ return;
+
+ // Any other case means that we had a different assumption last time. We don't
+ // currently support mixing assumptions for diagnostic reasons, so we set
+ // our assumption to be impossible.
+ default:
+ A = Impossible;
+ return;
+ }
+}
+
+// Recursively find any substatements containing macros
+bool IdempotentOperationChecker::containsMacro(const Stmt *S) {
+ if (S->getLocStart().isMacroID())
+ return true;
+
+ if (S->getLocEnd().isMacroID())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsMacro(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing enum constants
+bool IdempotentOperationChecker::containsEnum(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR && isa<EnumConstantDecl>(DR->getDecl()))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsEnum(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing __builtin_offset_of
+bool IdempotentOperationChecker::containsBuiltinOffsetOf(const Stmt *S) {
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(S);
+
+ if (UO && UO->getOpcode() == UnaryOperator::OffsetOf)
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsBuiltinOffsetOf(child))
+ return true;
+
+ return false;
+}
+
+bool IdempotentOperationChecker::containsZeroConstant(const Stmt *S) {
+ const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S);
+ if (IL && IL->getValue() == 0)
+ return true;
+
+ const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S);
+ if (FL && FL->getValue().isZero())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsZeroConstant(child))
+ return true;
+
+ return false;
+}
+
+bool IdempotentOperationChecker::containsOneConstant(const Stmt *S) {
+ const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S);
+ if (IL && IL->getValue() == 1)
+ return true;
+
+ const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S);
+ const llvm::APFloat one(1.0);
+ if (FL && FL->getValue().compare(one) == llvm::APFloat::cmpEqual)
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsOneConstant(child))
+ return true;
+
+ return false;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
index 39ded43..c121257 100644
--- a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp
@@ -34,13 +34,15 @@ static bool IsLLVMStringRef(QualType T) {
"class llvm::StringRef";
}
-static bool InStdNamespace(const Decl *D) {
+/// Check whether the declaration is semantically inside the top-level
+/// namespace named by ns.
+static bool InNamespace(const Decl *D, llvm::StringRef NS) {
const DeclContext *DC = D->getDeclContext();
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
if (!ND)
return false;
const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "std")
+ if (!II || !II->getName().equals(NS))
return false;
DC = ND->getDeclContext();
return isa<TranslationUnitDecl>(DC);
@@ -56,50 +58,26 @@ static bool IsStdString(QualType T) {
const TypedefDecl *TD = TT->getDecl();
- if (!InStdNamespace(TD))
+ if (!InNamespace(TD, "std"))
return false;
return TD->getName() == "string";
}
-static bool InClangNamespace(const Decl *D) {
- const DeclContext *DC = D->getDeclContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
- if (!ND)
- return false;
- const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "clang")
- return false;
- DC = ND->getDeclContext();
- return isa<TranslationUnitDecl>(DC);
-}
-
-static bool InLLVMNamespace(const Decl *D) {
- const DeclContext *DC = D->getDeclContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
- if (!ND)
- return false;
- const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "llvm")
- return false;
- DC = ND->getDeclContext();
- return isa<TranslationUnitDecl>(DC);
-}
-
static bool IsClangType(const RecordDecl *RD) {
- return RD->getName() == "Type" && InClangNamespace(RD);
+ return RD->getName() == "Type" && InNamespace(RD, "clang");
}
static bool IsClangDecl(const RecordDecl *RD) {
- return RD->getName() == "Decl" && InClangNamespace(RD);
+ return RD->getName() == "Decl" && InNamespace(RD, "clang");
}
static bool IsClangStmt(const RecordDecl *RD) {
- return RD->getName() == "Stmt" && InClangNamespace(RD);
+ return RD->getName() == "Stmt" && InNamespace(RD, "clang");
}
-static bool isClangAttr(const RecordDecl *RD) {
- return RD->getName() == "Attr" && InClangNamespace(RD);
+static bool IsClangAttr(const RecordDecl *RD) {
+ return RD->getName() == "Attr" && InNamespace(RD, "clang");
}
static bool IsStdVector(QualType T) {
@@ -110,7 +88,7 @@ static bool IsStdVector(QualType T) {
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
- if (!TD || !InStdNamespace(TD))
+ if (!TD || !InNamespace(TD, "std"))
return false;
return TD->getName() == "vector";
@@ -124,7 +102,7 @@ static bool IsSmallVector(QualType T) {
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
- if (!TD || !InLLVMNamespace(TD))
+ if (!TD || !InNamespace(TD, "llvm"))
return false;
return TD->getName() == "SmallVector";
@@ -214,7 +192,7 @@ static bool AllocatesMemory(QualType T) {
// This type checking could be sped up via dynamic programming.
static bool IsPartOfAST(const CXXRecordDecl *R) {
- if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || isClangAttr(R))
+ if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
return true;
for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
@@ -316,7 +294,7 @@ static void ScanCodeDecls(DeclContext *DC, BugReporter &BR) {
Decl *D = *I;
- if (D->getBody())
+ if (D->hasBody())
CheckStringRefAssignedTemporary(D, BR);
if (CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(D))
diff --git a/contrib/llvm/tools/clang/lib/Checker/Makefile b/contrib/llvm/tools/clang/lib/Checker/Makefile
index c45ab29..1bc6529 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Makefile
+++ b/contrib/llvm/tools/clang/lib/Checker/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangChecker
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
index 086dbd8..dcc21ca 100644
--- a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp
@@ -59,15 +59,16 @@ class MallocChecker : public CheckerVisitor<MallocChecker> {
BuiltinBug *BT_DoubleFree;
BuiltinBug *BT_Leak;
BuiltinBug *BT_UseFree;
- IdentifierInfo *II_malloc, *II_free, *II_realloc;
+ BuiltinBug *BT_BadFree;
+ IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc;
public:
MallocChecker()
- : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0),
- II_malloc(0), II_free(0), II_realloc(0) {}
+ : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0), BT_BadFree(0),
+ II_malloc(0), II_free(0), II_realloc(0), II_calloc(0) {}
static void *getTag();
bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
- void EvalDeadSymbols(CheckerContext &C,const Stmt *S,SymbolReaper &SymReaper);
+ void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
const GRState *EvalAssume(const GRState *state, SVal Cond, bool Assumption);
@@ -76,12 +77,24 @@ public:
private:
void MallocMem(CheckerContext &C, const CallExpr *CE);
const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
- const Expr *SizeEx, const GRState *state);
+ const Expr *SizeEx, SVal Init,
+ const GRState *state) {
+ return MallocMemAux(C, CE, state->getSVal(SizeEx), Init, state);
+ }
+ const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ SVal SizeEx, SVal Init,
+ const GRState *state);
+
void FreeMem(CheckerContext &C, const CallExpr *CE);
const GRState *FreeMemAux(CheckerContext &C, const CallExpr *CE,
const GRState *state);
void ReallocMem(CheckerContext &C, const CallExpr *CE);
+ void CallocMem(CheckerContext &C, const CallExpr *CE);
+
+ bool SummarizeValue(llvm::raw_ostream& os, SVal V);
+ bool SummarizeRegion(llvm::raw_ostream& os, const MemRegion *MR);
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range);
};
} // end anonymous namespace
@@ -120,6 +133,8 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
II_free = &Ctx.Idents.get("free");
if (!II_realloc)
II_realloc = &Ctx.Idents.get("realloc");
+ if (!II_calloc)
+ II_calloc = &Ctx.Idents.get("calloc");
if (FD->getIdentifier() == II_malloc) {
MallocMem(C, CE);
@@ -136,30 +151,44 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
return true;
}
+ if (FD->getIdentifier() == II_calloc) {
+ CallocMem(C, CE);
+ return true;
+ }
+
return false;
}
void MallocChecker::MallocMem(CheckerContext &C, const CallExpr *CE) {
- const GRState *state = MallocMemAux(C, CE, CE->getArg(0), C.getState());
+ const GRState *state = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(),
+ C.getState());
C.addTransition(state);
}
const GRState *MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
- const Expr *SizeEx,
+ SVal Size, SVal Init,
const GRState *state) {
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
ValueManager &ValMgr = C.getValueManager();
+ // Set the return value.
SVal RetVal = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+ state = state->BindExpr(CE, RetVal);
- SVal Size = state->getSVal(SizeEx);
+ // Fill the region with the initialization value.
+ state = state->bindDefault(RetVal, Init);
- state = C.getEngine().getStoreManager().setExtent(state, RetVal.getAsRegion(),
- Size);
+ // Set the region's extent equal to the Size parameter.
+ const SymbolicRegion *R = cast<SymbolicRegion>(RetVal.getAsRegion());
+ DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+ DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
+
+ SValuator &SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ExtentMatchesSize =
+ SVator.EvalEQ(state, Extent, DefinedSize);
+ state = state->Assume(ExtentMatchesSize, true);
- state = state->BindExpr(CE, RetVal);
-
SymbolRef Sym = RetVal.getAsLocSymbol();
assert(Sym);
// Set the symbol's state to Allocated.
@@ -175,18 +204,59 @@ void MallocChecker::FreeMem(CheckerContext &C, const CallExpr *CE) {
const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
const GRState *state) {
- SVal ArgVal = state->getSVal(CE->getArg(0));
+ const Expr *ArgExpr = CE->getArg(0);
+ SVal ArgVal = state->getSVal(ArgExpr);
// If ptr is NULL, no operation is preformed.
if (ArgVal.isZeroConstant())
return state;
+
+ // Unknown values could easily be okay
+ // Undefined values are handled elsewhere
+ if (ArgVal.isUnknownOrUndef())
+ return state;
- SymbolRef Sym = ArgVal.getAsLocSymbol();
-
+ const MemRegion *R = ArgVal.getAsRegion();
+
+ // Nonlocs can't be freed, of course.
+ // Non-region locations (labels and fixed addresses) also shouldn't be freed.
+ if (!R) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ R = R->StripCasts();
+
+ // Blocks might show up as heap data, but should not be free()d
+ if (isa<BlockDataRegion>(R)) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ // Parameters, locals, statics, and globals shouldn't be freed.
+ if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
+ // FIXME: at the time this code was written, malloc() regions were
+ // represented by conjured symbols, which are all in UnknownSpaceRegion.
+ // This means that there isn't actually anything from HeapSpaceRegion
+ // that should be freed, even though we allow it here.
+ // Of course, free() can work on memory allocated outside the current
+ // function, so UnknownSpaceRegion is always a possibility.
+ // False negatives are better than false positives.
+
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
// Various cases could lead to non-symbol values here.
- if (!Sym)
+ // For now, ignore them.
+ if (!SR)
return state;
+ SymbolRef Sym = SR->getSymbol();
+
const RefState *RS = state->get<RegionState>(Sym);
// If the symbol has not been tracked, return. This is possible when free() is
@@ -214,6 +284,135 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
return state->set<RegionState>(Sym, RefState::getReleased(CE));
}
+bool MallocChecker::SummarizeValue(llvm::raw_ostream& os, SVal V) {
+ if (nonloc::ConcreteInt *IntVal = dyn_cast<nonloc::ConcreteInt>(&V))
+ os << "an integer (" << IntVal->getValue() << ")";
+ else if (loc::ConcreteInt *ConstAddr = dyn_cast<loc::ConcreteInt>(&V))
+ os << "a constant address (" << ConstAddr->getValue() << ")";
+ else if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&V))
+ os << "the address of the label '"
+ << Label->getLabel()->getID()->getName()
+ << "'";
+ else
+ return false;
+
+ return true;
+}
+
+bool MallocChecker::SummarizeRegion(llvm::raw_ostream& os,
+ const MemRegion *MR) {
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << FD << "'";
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ // FIXME: where the block came from?
+ os << "a block";
+ return true;
+ default: {
+ const MemSpaceRegion *MS = MR->getMemorySpace();
+
+ switch (MS->getKind()) {
+ case MemRegion::StackLocalsSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the local variable '" << VD->getName() << "'";
+ else
+ os << "the address of a local stack variable";
+ return true;
+ }
+ case MemRegion::StackArgumentsSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the parameter '" << VD->getName() << "'";
+ else
+ os << "the address of a parameter";
+ return true;
+ }
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD) {
+ if (VD->isStaticLocal())
+ os << "the address of the static variable '" << VD->getName() << "'";
+ else
+ os << "the address of the global variable '" << VD->getName() << "'";
+ } else
+ os << "the address of a global variable";
+ return true;
+ }
+ default:
+ return false;
+ }
+ }
+ }
+}
+
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange range) {
+ ExplodedNode *N = C.GenerateSink();
+ if (N) {
+ if (!BT_BadFree)
+ BT_BadFree = new BuiltinBug("Bad free");
+
+ llvm::SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const MemRegion *MR = ArgVal.getAsRegion();
+ if (MR) {
+ while (const ElementRegion *ER = dyn_cast<ElementRegion>(MR))
+ MR = ER->getSuperRegion();
+
+ // Special case for alloca()
+ if (isa<AllocaRegion>(MR))
+ os << "Argument to free() was allocated by alloca(), not malloc()";
+ else {
+ os << "Argument to free() is ";
+ if (SummarizeRegion(os, MR))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+ } else {
+ os << "Argument to free() is ";
+ if (SummarizeValue(os, ArgVal))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+
+ EnhancedBugReport *R = new EnhancedBugReport(*BT_BadFree, os.str(), N);
+ R->addRange(range);
+ C.EmitReport(R);
+ }
+}
+
void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Arg0 = CE->getArg(0);
@@ -234,7 +433,8 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
if (Sym)
stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE));
- const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1), stateEqual);
+ const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+ UndefinedVal(), stateEqual);
C.addTransition(stateMalloc);
}
@@ -256,15 +456,31 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
if (stateFree) {
// FIXME: We should copy the content of the original buffer.
const GRState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
- stateFree);
+ UnknownVal(), stateFree);
C.addTransition(stateRealloc);
}
}
}
}
-void MallocChecker::EvalDeadSymbols(CheckerContext &C, const Stmt *S,
- SymbolReaper &SymReaper) {
+void MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SVator = C.getSValuator();
+
+ SVal Count = state->getSVal(CE->getArg(0));
+ SVal EleSize = state->getSVal(CE->getArg(1));
+ SVal TotalSize = SVator.EvalBinOp(state, BinaryOperator::Mul, Count, EleSize,
+ ValMgr.getContext().getSizeType());
+
+ SVal Zero = ValMgr.makeZeroVal(ValMgr.getContext().CharTy);
+
+ state = MallocMemAux(C, CE, TotalSize, Zero, state);
+ C.addTransition(state);
+}
+
+void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
diff --git a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
index 575458c..9cfeb7a 100644
--- a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Checker/PathSensitive/ValueManager.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/AST/CharUnits.h"
@@ -29,22 +30,22 @@ template<typename RegionTy> struct MemRegionManagerTrait;
template <typename RegionTy, typename A1>
RegionTy* MemRegionManager::getRegion(const A1 a1) {
-
+
const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
@@ -56,72 +57,72 @@ RegionTy* MemRegionManager::getSubRegion(const A1 a1,
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2>
RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
-
+
const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2>
RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
const MemRegion *superRegion) {
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2, typename A3>
RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
const MemRegion *superRegion) {
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, a3, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
@@ -171,6 +172,53 @@ const StackFrameContext *VarRegion::getStackFrame() const {
}
//===----------------------------------------------------------------------===//
+// Region extents.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal DeclRegion::getExtent(ValueManager& ValMgr) const {
+ ASTContext& Ctx = ValMgr.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<VariableArrayType>(T))
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+ if (isa<IncompleteArrayType>(T))
+ return UnknownVal();
+
+ CharUnits Size = Ctx.getTypeSizeInChars(T);
+ QualType SizeTy = Ctx.getSizeType();
+ return ValMgr.makeIntVal(Size.getQuantity(), SizeTy);
+}
+
+DefinedOrUnknownSVal FieldRegion::getExtent(ValueManager& ValMgr) const {
+ DefinedOrUnknownSVal Extent = DeclRegion::getExtent(ValMgr);
+
+ // A zero-length array at the end of a struct often stands for dynamically-
+ // allocated extra memory.
+ if (Extent.isZeroConstant()) {
+ ASTContext& Ctx = ValMgr.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<ConstantArrayType>(T))
+ return UnknownVal();
+ }
+
+ return Extent;
+}
+
+DefinedOrUnknownSVal AllocaRegion::getExtent(ValueManager& ValMgr) const {
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal SymbolicRegion::getExtent(ValueManager& ValMgr) const {
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal StringRegion::getExtent(ValueManager& ValMgr) const {
+ QualType SizeTy = ValMgr.getContext().getSizeType();
+ return ValMgr.makeIntVal(getStringLiteral()->getByteLength()+1, SizeTy);
+}
+
+//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
@@ -183,6 +231,11 @@ void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(getStackFrame());
}
+void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getCodeRegion());
+}
+
void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const StringLiteral* Str,
const MemRegion* superRegion) {
@@ -226,7 +279,7 @@ void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
}
-
+
void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D,
const MemRegion* superRegion, Kind k) {
ID.AddInteger((unsigned) k);
@@ -349,7 +402,6 @@ void BlockDataRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "block_data{" << BC << '}';
}
-
void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const {
// FIXME: More elaborate pretty-printing.
os << "{ " << (void*) CL << " }";
@@ -368,6 +420,10 @@ void FieldRegion::dumpToStream(llvm::raw_ostream& os) const {
os << superRegion << "->" << getDecl();
}
+void NonStaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "NonStaticGlobalSpaceRegion";
+}
+
void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "ivar{" << superRegion << ',' << getDecl() << '}';
}
@@ -392,6 +448,10 @@ void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const {
os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}';
}
+void StaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "StaticGlobalsMemSpace{" << CR << '}';
+}
+
//===----------------------------------------------------------------------===//
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
@@ -412,7 +472,7 @@ const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
region = (REG*) A.Allocate<REG>();
new (region) REG(this, a);
}
-
+
return region;
}
@@ -442,8 +502,18 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
return R;
}
-const GlobalsSpaceRegion *MemRegionManager::getGlobalsRegion() {
- return LazyAllocate(globals);
+const GlobalsSpaceRegion
+*MemRegionManager::getGlobalsRegion(const CodeTextRegion *CR) {
+ if (!CR)
+ return LazyAllocate(globals);
+
+ StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
+ if (R)
+ return R;
+
+ R = A.Allocate<StaticGlobalSpaceRegion>();
+ new (R) StaticGlobalSpaceRegion(this, CR);
+ return R;
}
const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
@@ -462,7 +532,7 @@ const MemSpaceRegion *MemRegionManager::getCodeRegion() {
// Constructing regions.
//===----------------------------------------------------------------------===//
-const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) {
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
return getSubRegion<StringRegion>(Str, getGlobalsRegion());
}
@@ -470,7 +540,9 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
const MemRegion *sReg = 0;
- if (D->hasLocalStorage()) {
+ if (D->hasGlobalStorage() && !D->isStaticLocal())
+ sReg = getGlobalsRegion();
+ else {
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext.
const DeclContext *DC = D->getDeclContext();
@@ -479,15 +551,32 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
if (!STC)
sReg = getUnknownRegion();
else {
- sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
- ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
- : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ if (D->hasLocalStorage()) {
+ sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ }
+ else {
+ assert(D->isStaticLocal());
+ const Decl *D = STC->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ sReg = getGlobalsRegion(getFunctionTextRegion(FD));
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ const BlockTextRegion *BTR =
+ getBlockTextRegion(BD,
+ C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
+ STC->getAnalysisContext());
+ sReg = getGlobalsRegion(BTR);
+ }
+ else {
+ // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now
+ // just use the main global memspace.
+ sReg = getGlobalsRegion();
+ }
+ }
}
}
- else {
- sReg = getGlobalsRegion();
- }
-
+
return getSubRegion<VarRegion>(D, sReg);
}
@@ -500,10 +589,10 @@ const BlockDataRegion *
MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
const LocationContext *LC) {
const MemRegion *sReg = 0;
-
- if (LC) {
+
+ if (LC) {
// FIXME: Once we implement scope handling, we want the parent region
- // to be the scope.
+ // to be the scope.
const StackFrameContext *STC = LC->getCurrentStackFrame();
assert(STC);
sReg = getStackLocalsRegion(STC);
@@ -520,9 +609,9 @@ MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
const CompoundLiteralRegion*
MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
const LocationContext *LC) {
-
+
const MemRegion *sReg = 0;
-
+
if (CL->isFileScope())
sReg = getGlobalsRegion();
else {
@@ -530,7 +619,7 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
assert(STC);
sReg = getStackLocalsRegion(STC);
}
-
+
return getSubRegion<CompoundLiteralRegion>(CL, sReg);
}
@@ -749,24 +838,24 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
AnalysisContext *AC = getCodeRegion()->getAnalysisContext();
AnalysisContext::referenced_decls_iterator I, E;
llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
-
+
if (I == E) {
ReferencedVars = (void*) 0x1;
return;
}
-
+
MemRegionManager &MemMgr = *getMemRegionManager();
llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
BumpVectorContext BC(A);
-
+
typedef BumpVector<const MemRegion*> VarVec;
VarVec *BV = (VarVec*) A.Allocate<VarVec>();
new (BV) VarVec(BC, E - I);
-
+
for ( ; I != E; ++I) {
const VarDecl *VD = *I;
const VarRegion *VR = 0;
-
+
if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage())
VR = MemMgr.getVarRegion(VD, this);
else {
@@ -776,11 +865,11 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
}
}
-
+
assert(VR);
BV->push_back(VR, BC);
}
-
+
ReferencedVars = BV;
}
@@ -790,7 +879,7 @@ BlockDataRegion::referenced_vars_begin() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
-
+
return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
NULL : Vec->begin());
}
@@ -801,7 +890,7 @@ BlockDataRegion::referenced_vars_end() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
-
+
return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
NULL : Vec->end());
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
index e743528..1ea1bd9 100644
--- a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp
@@ -100,7 +100,13 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
const GRState *state = C.getState();
ExplodedNodeSet Tmp;
SVal location = state->getSVal(theValueExpr);
- // Here we should use the value type of the region as the load type.
+ // Here we should use the value type of the region as the load type, because
+ // we are simulating the semantics of the function, not the semantics of
+ // passing argument. So the type of theValue expr is not we are loading.
+ // But usually the type of the varregion is not the type we want either,
+ // we still need to do a CastRetrievedVal in store manager. So actually this
+ // LoadTy specifying can be omitted. But we put it here to emphasize the
+ // semantics.
QualType LoadTy;
if (const TypedRegion *TR =
dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
diff --git a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
index 963923c..cf05a7d 100644
--- a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp
@@ -107,7 +107,7 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
new PathDiagnosticEventPiece(Info.getLocation(), StrC.str());
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
- P->addRange(Info.getRange(i));
+ P->addRange(Info.getRange(i).getAsRange());
for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i)
P->addFixItHint(Info.getFixItHint(i));
D->push_front(P);
@@ -181,15 +181,8 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSourceRange();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // FIXME: We would like to always get the function body, even
- // when it needs to be de-serialized, but getting the
- // ASTContext here requires significant changes.
- if (Stmt *Body = FD->getBody()) {
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
- return CS->getSourceRange();
- else
- return cast<CXXTryStmt>(Body)->getSourceRange();
- }
+ if (Stmt *Body = FD->getBody())
+ return Body->getSourceRange();
}
else {
SourceLocation L = D->getLocation();
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp
index 5706a07..13accbb 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PlistDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Checker/PathDiagnosticClients.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
diff --git a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
index c904c33..2a35d32 100644
--- a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp
@@ -105,97 +105,69 @@ public:
return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
}
- /// AddEQ - Create a new RangeSet with the additional constraint that the
- /// value be equal to V.
- RangeSet AddEQ(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- // Search for a range that includes 'V'. If so, return a new RangeSet
- // representing { [V, V] }.
- for (PrimRangeSet::iterator i = begin(), e = end(); i!=e; ++i)
- if (i->Includes(V))
- return RangeSet(F, V, V);
-
- return RangeSet(F);
- }
-
- /// AddNE - Create a new RangeSet with the additional constraint that the
- /// value be not be equal to V.
- RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = ranges;
-
- // FIXME: We can perhaps enhance ImmutableSet to do this search for us
- // in log(N) time using the sorted property of the internal AVL tree.
- for (iterator i = begin(), e = end(); i != e; ++i) {
- if (i->Includes(V)) {
- // Remove the old range.
- newRanges = F.Remove(newRanges, *i);
- // Split the old range into possibly one or two ranges.
- if (V != i->From())
- newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
- if (V != i->To())
- newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- // All of the ranges are non-overlapping, so we can stop.
+private:
+ void IntersectInRange(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
+ // There are six cases for each range R in the set:
+ // 1. R is entirely before the intersection range.
+ // 2. R is entirely after the intersection range.
+ // 3. R contains the entire intersection range.
+ // 4. R starts before the intersection range and ends in the middle.
+ // 5. R starts in the middle of the intersection range and ends after it.
+ // 6. R is entirely contained in the intersection range.
+ // These correspond to each of the conditions below.
+ for (/* i = begin(), e = end() */; i != e; ++i) {
+ if (i->To() < Lower) {
+ continue;
+ }
+ if (i->From() > Upper) {
break;
}
- }
-
- return newRanges;
- }
-
- /// AddNE - Create a new RangeSet with the additional constraint that the
- /// value be less than V.
- RangeSet AddLT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
-
- for (iterator i = begin(), e = end() ; i != e ; ++i) {
- if (i->Includes(V) && i->From() < V)
- newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
- else if (i->To() < V)
- newRanges = F.Add(newRanges, *i);
- }
-
- return newRanges;
- }
-
- RangeSet AddLE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
- for (iterator i = begin(), e = end(); i != e; ++i) {
- // Strictly we should test for includes *V + 1, but no harm is
- // done by this formulation
- if (i->Includes(V))
- newRanges = F.Add(newRanges, Range(i->From(), V));
- else if (i->To() <= V)
- newRanges = F.Add(newRanges, *i);
- }
-
- return newRanges;
- }
-
- RangeSet AddGT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
-
- for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
- if (i->Includes(V) && i->To() > V)
- newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- else if (i->From() > V)
- newRanges = F.Add(newRanges, *i);
+ if (i->Includes(Lower)) {
+ if (i->Includes(Upper)) {
+ newRanges = F.Add(newRanges, Range(BV.getValue(Lower),
+ BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.Add(newRanges, Range(BV.getValue(Lower), i->To()));
+ } else {
+ if (i->Includes(Upper)) {
+ newRanges = F.Add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.Add(newRanges, *i);
+ }
}
-
- return newRanges;
}
- RangeSet AddGE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+public:
+ // Returns a set containing the values in the receiving set, intersected with
+ // the closed range [Lower, Upper]. Unlike the Range type, this range uses
+ // modular arithmetic, corresponding to the common treatment of C integer
+ // overflow. Thus, if the Lower bound is greater than the Upper bound, the
+ // range is taken to wrap around. This is equivalent to taking the
+ // intersection with the two ranges [Min, Upper] and [Lower, Max],
+ // or, alternatively, /removing/ all integers between Upper and Lower.
+ RangeSet Intersect(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper) const {
PrimRangeSet newRanges = F.GetEmptySet();
- for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
- // Strictly we should test for includes *V - 1, but no harm is
- // done by this formulation
- if (i->Includes(V))
- newRanges = F.Add(newRanges, Range(V, i->To()));
- else if (i->From() >= V)
- newRanges = F.Add(newRanges, *i);
+ PrimRangeSet::iterator i = begin(), e = end();
+ if (Lower <= Upper)
+ IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+ else {
+ // The order of the next two statements is important!
+ // IntersectInRange() does not reset the iteration state for i and e.
+ // Therefore, the lower range most be handled first.
+ IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+ IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
}
-
return newRanges;
}
@@ -237,23 +209,29 @@ public:
RangeConstraintManager(GRSubEngine &subengine)
: SimpleConstraintManager(subengine) {}
- const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
@@ -303,10 +281,6 @@ RangeConstraintManager::RemoveDeadBindings(const GRState* state,
return state->set<ConstraintRange>(CR);
}
-//===------------------------------------------------------------------------===
-// AssumeSymX methods: public interface for RangeConstraintManager.
-//===------------------------------------------------------------------------===/
-
RangeSet
RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
@@ -323,20 +297,127 @@ RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
// AssumeSymX methods: public interface for RangeConstraintManager.
//===------------------------------------------------------------------------===/
-#define AssumeX(OP)\
-const GRState*\
-RangeConstraintManager::AssumeSym ## OP(const GRState* state, SymbolRef sym,\
- const llvm::APSInt& V){\
- const RangeSet& R = GetRange(state, sym).Add##OP(state->getBasicVals(), F, V);\
- return !R.isEmpty() ? state->set<ConstraintRange>(sym, R) : NULL;\
+// The syntax for ranges below is mathematical, using [x, y] for closed ranges
+// and (x, y) for open ranges. These ranges are modular, corresponding with
+// a common treatment of C integer overflow. This means that these methods
+// do not have to worry about overflow; RangeSet::Intersect can handle such a
+// "wraparound" range.
+// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
+// UINT_MAX, 0, 1, and 2.
+
+const GRState*
+RangeConstraintManager::AssumeSymNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Lower;
+ --Lower;
+ ++Upper;
+
+ // [Int-Adjustment+1, Int-Adjustment-1]
+ // Notice that the lower bound is greater than the upper bound.
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-AssumeX(EQ)
-AssumeX(NE)
-AssumeX(LT)
-AssumeX(GT)
-AssumeX(LE)
-AssumeX(GE)
+const GRState*
+RangeConstraintManager::AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ // [Int-Adjustment, Int-Adjustment]
+ BasicValueFactory &BV = state->getBasicVals();
+ llvm::APSInt AdjInt = Int-Adjustment;
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymLT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always false.
+ if (Int == Min)
+ return NULL;
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+ --Upper;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymGT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always false.
+ if (Int == Max)
+ return NULL;
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+ ++Lower;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymGE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always feasible.
+ if (Int == Min)
+ return state;
+
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always feasible.
+ if (Int == Max)
+ return state;
+
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
//===------------------------------------------------------------------------===
// Pretty-printing.
diff --git a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
index c4072fd..74a7fee 100644
--- a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp
@@ -118,22 +118,6 @@ public:
}
//===----------------------------------------------------------------------===//
-// Region "Extents"
-//===----------------------------------------------------------------------===//
-//
-// MemRegions represent chunks of memory with a size (their "extent"). This
-// GDM entry tracks the extents for regions. Extents are in bytes.
-//
-namespace { class RegionExtents {}; }
-static int RegionExtentsIndex = 0;
-namespace clang {
- template<> struct GRStateTrait<RegionExtents>
- : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
- static void* GDMIndex() { return &RegionExtentsIndex; }
- };
-}
-
-//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
@@ -244,14 +228,16 @@ public:
Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS) {
- return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS);
+ return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS,
+ false);
}
Store InvalidateRegions(Store store,
const MemRegion * const *Begin,
const MemRegion * const *End,
const Expr *E, unsigned Count,
- InvalidatedSymbols *IS);
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals);
public: // Made public for helper classes.
@@ -280,6 +266,14 @@ public: // Part of public interface to class.
Store Bind(Store store, Loc LV, SVal V);
+ // BindDefault is only used to initialize a region with a default value.
+ Store BindDefault(Store store, const MemRegion *R, SVal V) {
+ RegionBindings B = GetRegionBindings(store);
+ assert(!Lookup(B, R, BindingKey::Default));
+ assert(!Lookup(B, R, BindingKey::Direct));
+ return Add(B, R, BindingKey::Default, V).getRoot();
+ }
+
Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL,
const LocationContext *LC, SVal V);
@@ -339,6 +333,12 @@ public: // Part of public interface to class.
SVal RetrieveArray(Store store, const TypedRegion* R);
+ /// Used to lazily generate derived symbols for bindings that are defined
+ /// implicitly by default bindings in a super region.
+ Optional<SVal> RetrieveDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedRegion *R, QualType Ty);
+
/// Get the state and region whose binding this region R corresponds to.
std::pair<Store, const MemRegion*>
GetLazyBinding(RegionBindings B, const MemRegion *R);
@@ -352,7 +352,7 @@ public: // Part of public interface to class.
/// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
@@ -364,18 +364,7 @@ public: // Part of public interface to class.
// Region "extents".
//===------------------------------------------------------------------===//
- const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent){
- return state->set<RegionExtents>(R, Extent);
- }
-
- Optional<SVal> getExtent(const GRState *state, const MemRegion *R) {
- const SVal *V = state->get<RegionExtents>(R);
- if (V)
- return *V;
- else
- return Optional<SVal>();
- }
-
+ // FIXME: This method will soon be eliminated; see the note in Store.h.
DefinedOrUnknownSVal getSizeInElements(const GRState *state,
const MemRegion* R, QualType EleTy);
@@ -391,12 +380,17 @@ public: // Part of public interface to class.
const char *sep);
void iterBindings(Store store, BindingsHandler& f) {
- // FIXME: Implement.
- }
-
- // FIXME: Remove.
- BasicValueFactory& getBasicVals() {
- return StateMgr.getBasicVals();
+ RegionBindings B = GetRegionBindings(store);
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ const BindingKey &K = I.getKey();
+ if (!K.isDirect())
+ continue;
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) {
+ // FIXME: Possibly incorporate the offset?
+ if (!f.HandleBinding(*this, store, R, I.getData()))
+ return;
+ }
+ }
}
// FIXME: Remove.
@@ -483,12 +477,13 @@ public:
RegionBindings getRegionBindings() const { return B; }
- void AddToCluster(BindingKey K) {
+ RegionCluster &AddToCluster(BindingKey K) {
const MemRegion *R = K.getRegion();
const MemRegion *baseR = R->getBaseRegion();
RegionCluster &C = getCluster(baseR);
C.push_back(K, BVC);
static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C);
+ return C;
}
bool isVisited(const MemRegion *R) {
@@ -504,15 +499,20 @@ public:
return *CRef;
}
- void GenerateClusters() {
+ void GenerateClusters(bool includeGlobals = false) {
// Scan the entire set of bindings and make the region clusters.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- AddToCluster(RI.getKey());
+ RegionCluster &C = AddToCluster(RI.getKey());
if (const MemRegion *R = RI.getData().getAsRegion()) {
// Generate a cluster, but don't add the region to the cluster
// if there aren't any bindings.
getCluster(R->getBaseRegion());
}
+ if (includeGlobals) {
+ const MemRegion *R = RI.getKey().getRegion();
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ AddToWorkList(R, C);
+ }
}
}
@@ -615,8 +615,8 @@ void InvalidateRegionsWorker::VisitBinding(SVal V) {
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const MemRegion *baseR = RI.getKey().getRegion();
- if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
VisitBinding(RI.getData());
}
@@ -706,13 +706,14 @@ Store RegionStoreManager::InvalidateRegions(Store store,
const MemRegion * const *I,
const MemRegion * const *E,
const Expr *Ex, unsigned Count,
- InvalidatedSymbols *IS) {
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
InvalidateRegionsWorker W(*this, StateMgr,
RegionStoreManager::GetRegionBindings(store),
Ex, Count, IS);
// Scan the bindings and generate the clusters.
- W.GenerateClusters();
+ W.GenerateClusters(invalidateGlobals);
// Add I .. E to the worklist.
for ( ; I != E; ++I)
@@ -721,7 +722,20 @@ Store RegionStoreManager::InvalidateRegions(Store store,
W.RunWorkList();
// Return the new bindings.
- return W.getRegionBindings().getRoot();
+ RegionBindings B = W.getRegionBindings();
+
+ if (invalidateGlobals) {
+ // Bind the non-static globals memory space to a new symbol that we will
+ // use to derive the bindings for all non-static globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
+ SVal V =
+ ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+ B = Add(B, BindingKey::Make(GS, BindingKey::Default), V);
+ }
+
+ return B.getRoot();
}
//===----------------------------------------------------------------------===//
@@ -731,82 +745,19 @@ Store RegionStoreManager::InvalidateRegions(Store store,
DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
const MemRegion *R,
QualType EleTy) {
+ SVal Size = cast<SubRegion>(R)->getExtent(ValMgr);
+ SValuator &SVator = ValMgr.getSValuator();
+ const llvm::APSInt *SizeInt = SVator.getKnownValue(state, Size);
+ if (!SizeInt)
+ return UnknownVal();
- switch (R->getKind()) {
- case MemRegion::CXXThisRegionKind:
- assert(0 && "Cannot get size of 'this' region");
- case MemRegion::GenericMemSpaceRegionKind:
- case MemRegion::StackLocalsSpaceRegionKind:
- case MemRegion::StackArgumentsSpaceRegionKind:
- case MemRegion::HeapSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind:
- case MemRegion::UnknownSpaceRegionKind:
- assert(0 && "Cannot index into a MemSpace");
- return UnknownVal();
-
- case MemRegion::FunctionTextRegionKind:
- case MemRegion::BlockTextRegionKind:
- case MemRegion::BlockDataRegionKind:
- // Technically this can happen if people do funny things with casts.
- return UnknownVal();
-
- // Not yet handled.
- case MemRegion::AllocaRegionKind:
- case MemRegion::CompoundLiteralRegionKind:
- case MemRegion::ElementRegionKind:
- case MemRegion::FieldRegionKind:
- case MemRegion::ObjCIvarRegionKind:
- case MemRegion::CXXObjectRegionKind:
- return UnknownVal();
-
- case MemRegion::SymbolicRegionKind: {
- const SVal *Size = state->get<RegionExtents>(R);
- if (!Size)
- return UnknownVal();
- const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(Size);
- if (!CI)
- return UnknownVal();
-
- CharUnits RegionSize =
- CharUnits::fromQuantity(CI->getValue().getSExtValue());
- CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- assert(RegionSize % EleSize == 0);
-
- return ValMgr.makeIntVal(RegionSize / EleSize, false);
- }
-
- case MemRegion::StringRegionKind: {
- const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral();
- // We intentionally made the size value signed because it participates in
- // operations with signed indices.
- return ValMgr.makeIntVal(Str->getByteLength()+1, false);
- }
-
- case MemRegion::VarRegionKind: {
- const VarRegion* VR = cast<VarRegion>(R);
- // Get the type of the variable.
- QualType T = VR->getDesugaredValueType(getContext());
-
- // FIXME: Handle variable-length arrays.
- if (isa<VariableArrayType>(T))
- return UnknownVal();
-
- if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
- // return the size as signed integer.
- return ValMgr.makeIntVal(CAT->getSize(), false);
- }
-
- // Clients can reinterpret ordinary variables as arrays, possibly of
- // another type. The width is rounded down to ensure that an access is
- // entirely within bounds.
- CharUnits VarSize = getContext().getTypeSizeInChars(T);
- CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- return ValMgr.makeIntVal(VarSize / EleSize, false);
- }
- }
+ CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+ CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- assert(0 && "Unreachable");
- return UnknownVal();
+ // If a variable is reinterpreted as a type that doesn't fit into a larger
+ // type evenly, round it down.
+ // This is a signed value, since it's used in arithmetic with signed indices.
+ return ValMgr.makeIntVal(RegionSize / EleSize, false);
}
//===----------------------------------------------------------------------===//
@@ -849,6 +800,19 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
if (!isa<loc::MemRegionVal>(L))
return UnknownVal();
+ // Special case for zero RHS.
+ if (R.isZeroConstant()) {
+ switch (Op) {
+ default:
+ // Handle it normally.
+ break;
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ // FIXME: does this need to be casted to match resultTy?
+ return L;
+ }
+ }
+
const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
const ElementRegion *ER = 0;
@@ -870,8 +834,7 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
}
case MemRegion::AllocaRegionKind: {
const AllocaRegion *AR = cast<AllocaRegion>(MR);
- QualType T = getContext().CharTy; // Create an ElementRegion of bytes.
- QualType EleTy = T->getAs<PointerType>()->getPointeeType();
+ QualType EleTy = getContext().CharTy; // Create an ElementRegion of bytes.
SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
break;
@@ -907,7 +870,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
case MemRegion::StackLocalsSpaceRegionKind:
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind:
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
return UnknownVal();
@@ -946,7 +910,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
//===----------------------------------------------------------------------===//
Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
- const MemRegion *R) {
+ const MemRegion *R) {
+
if (const SVal *V = Lookup(B, R, BindingKey::Direct))
return *V;
@@ -1009,8 +974,13 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
- if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR))
+ if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) {
+ if (T.isNull()) {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ T = SR->getSymbol()->getType(getContext());
+ }
MR = GetElementZeroRegion(MR, T);
+ }
if (isa<CodeTextRegion>(MR)) {
assert(0 && "Why load from a code text region?");
@@ -1172,27 +1142,33 @@ SVal RegionStoreManager::RetrieveElement(Store store,
}
}
- // Check if the immediate super region has a direct binding.
- if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
- if (SymbolRef parentSym = V->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (V->isUnknownOrUndef())
- return *V;
-
- // Handle LazyCompoundVals for the immediate super region. Other cases
- // are handled in 'RetrieveFieldOrElementCommon'.
- if (const nonloc::LazyCompoundVal *LCV =
- dyn_cast<nonloc::LazyCompoundVal>(V)) {
-
- R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion());
- return RetrieveElement(LCV->getStore(), R);
+ // Handle the case where we are indexing into a larger scalar object.
+ // For example, this handles:
+ // int x = ...
+ // char *y = &x;
+ // return *y;
+ // FIXME: This is a hack, and doesn't do anything really intelligent yet.
+ const RegionRawOffset &O = R->getAsRawOffset();
+ if (const TypedRegion *baseR = dyn_cast_or_null<TypedRegion>(O.getRegion())) {
+ QualType baseT = baseR->getValueType(Ctx);
+ if (baseT->isScalarType()) {
+ QualType elemT = R->getElementType();
+ if (elemT->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
+ if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
}
-
- // Other cases: give up.
- return UnknownVal();
}
-
return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR);
}
@@ -1208,6 +1184,28 @@ SVal RegionStoreManager::RetrieveField(Store store,
return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
}
+Optional<SVal>
+RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedRegion *R,
+ QualType Ty) {
+
+ if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = D->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (D->isZeroConstant())
+ return ValMgr.makeZeroVal(Ty);
+
+ if (D->isUnknownOrUndef())
+ return *D;
+
+ assert(0 && "Unknown default value");
+ }
+
+ return Optional<SVal>();
+}
+
SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
const TypedRegion *R,
QualType Ty,
@@ -1219,18 +1217,8 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
RegionBindings B = GetRegionBindings(store);
while (superR) {
- if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
- if (SymbolRef parentSym = D->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (D->isZeroConstant())
- return ValMgr.makeZeroVal(Ty);
-
- if (D->isUnknown())
- return *D;
-
- assert(0 && "Unknown default value");
- }
+ if (const Optional<SVal> &D = RetrieveDerivedDefaultValue(B, superR, R, Ty))
+ return *D;
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
@@ -1311,7 +1299,7 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
return ValMgr.getRegionValueSymbolVal(R);
if (isa<GlobalsSpaceRegion>(MS)) {
- if (VD->isFileVarDecl()) {
+ if (isa<NonStaticGlobalSpaceRegion>(MS)) {
// Is 'VD' declared constant? If so, retrieve the constant value.
QualType CT = Ctx.getCanonicalType(T);
if (CT.isConstQualified()) {
@@ -1326,6 +1314,9 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
}
}
+ if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT))
+ return V.getValue();
+
return ValMgr.getRegionValueSymbolVal(R);
}
@@ -1449,6 +1440,7 @@ Store RegionStoreManager::BindCompoundLiteral(Store store,
V);
}
+
Store RegionStoreManager::setImplicitDefaultValue(Store store,
const MemRegion *R,
QualType T) {
@@ -1691,15 +1683,14 @@ class RemoveDeadBindingsWorker :
public ClusterAnalysis<RemoveDeadBindingsWorker> {
llvm::SmallVector<const SymbolicRegion*, 12> Postponed;
SymbolReaper &SymReaper;
- Stmt *Loc;
const StackFrameContext *CurrentLCtx;
-
+
public:
RemoveDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr,
RegionBindings b, SymbolReaper &symReaper,
- Stmt *loc, const StackFrameContext *LCtx)
+ const StackFrameContext *LCtx)
: ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b),
- SymReaper(symReaper), Loc(loc), CurrentLCtx(LCtx) {}
+ SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
@@ -1715,7 +1706,7 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
RegionCluster &C) {
if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
- if (SymReaper.isLive(Loc, VR))
+ if (SymReaper.isLive(VR))
AddToWorkList(baseR, C);
return;
@@ -1730,9 +1721,14 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
return;
}
+ if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
+ AddToWorkList(baseR, C);
+ return;
+ }
+
// CXXThisRegion in the current or parent location context is live.
if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
- const StackArgumentsSpaceRegion *StackReg =
+ const StackArgumentsSpaceRegion *StackReg =
cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
@@ -1754,8 +1750,8 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
const MemRegion *LazyR = LCS->getRegion();
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const MemRegion *baseR = RI.getKey().getRegion();
- if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
VisitBinding(RI.getData());
}
return;
@@ -1822,13 +1818,13 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
return changed;
}
-const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
RegionBindings B = GetRegionBindings(state.getStore());
- RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, Loc, LCtx);
+ RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
@@ -1862,13 +1858,6 @@ const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
}
state.setStore(B.getRoot());
const GRState *s = StateMgr.getPersistentState(state);
- // Remove the extents of dead symbolic regions.
- llvm::ImmutableMap<const MemRegion*,SVal> Extents = s->get<RegionExtents>();
- for (llvm::ImmutableMap<const MemRegion *, SVal>::iterator I=Extents.begin(),
- E = Extents.end(); I != E; ++I) {
- if (!W.isVisited(I->first))
- s = s->remove<RegionExtents>(I->first);
- }
return s;
}
@@ -1887,9 +1876,9 @@ GRState const *RegionStoreManager::EnterStackFrame(GRState const *state,
SVal ArgVal = state->getSVal(*AI);
store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
}
- } else if (const CXXConstructExpr *CE =
+ } else if (const CXXConstructExpr *CE =
dyn_cast<CXXConstructExpr>(frame->getCallSite())) {
- CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
+ CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
AE = CE->arg_end();
// Copy the arg expression value to the arg variables.
diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp
deleted file mode 100644
index 35b1cde..0000000
--- a/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-//== ReturnStackAddressChecker.cpp ------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines ReturnStackAddressChecker, which is a path-sensitive
-// check which looks for the addresses of stack variables being returned to
-// callers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/SmallString.h"
-
-using namespace clang;
-
-namespace {
-class ReturnStackAddressChecker :
- public CheckerVisitor<ReturnStackAddressChecker> {
- BuiltinBug *BT;
-public:
- ReturnStackAddressChecker() : BT(0) {}
- static void *getTag();
- void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
-private:
- void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
-};
-}
-
-void clang::RegisterReturnStackAddressChecker(GRExprEngine &Eng) {
- Eng.registerCheck(new ReturnStackAddressChecker());
-}
-
-void *ReturnStackAddressChecker::getTag() {
- static int x = 0; return &x;
-}
-
-void ReturnStackAddressChecker::EmitStackError(CheckerContext &C,
- const MemRegion *R,
- const Expr *RetE) {
- ExplodedNode *N = C.GenerateSink();
-
- if (!N)
- return;
-
- if (!BT)
- BT = new BuiltinBug("Return of address to stack-allocated memory");
-
- // Generate a report for this bug.
- llvm::SmallString<512> buf;
- llvm::raw_svector_ostream os(buf);
- SourceRange range;
-
- // Get the base region, stripping away fields and elements.
- R = R->getBaseRegion();
-
- // Check if the region is a compound literal.
- if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
- const CompoundLiteralExpr* CL = CR->getLiteralExpr();
- os << "Address of stack memory associated with a compound literal "
- "declared on line "
- << C.getSourceManager().getInstantiationLineNumber(CL->getLocStart())
- << " returned to caller";
- range = CL->getSourceRange();
- }
- else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
- const Expr* ARE = AR->getExpr();
- SourceLocation L = ARE->getLocStart();
- range = ARE->getSourceRange();
- os << "Address of stack memory allocated by call to alloca() on line "
- << C.getSourceManager().getInstantiationLineNumber(L)
- << " returned to caller";
- }
- else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
- const BlockDecl *BD = BR->getCodeRegion()->getDecl();
- SourceLocation L = BD->getLocStart();
- range = BD->getSourceRange();
- os << "Address of stack-allocated block declared on line "
- << C.getSourceManager().getInstantiationLineNumber(L)
- << " returned to caller";
- }
- else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- os << "Address of stack memory associated with local variable '"
- << VR->getString() << "' returned";
- range = VR->getDecl()->getSourceRange();
- }
- else {
- assert(false && "Invalid region in ReturnStackAddressChecker.");
- return;
- }
-
- RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
- report->addRange(RetE->getSourceRange());
- if (range.isValid())
- report->addRange(range);
-
- C.EmitReport(report);
-}
-
-void ReturnStackAddressChecker::PreVisitReturnStmt(CheckerContext &C,
- const ReturnStmt *RS) {
-
- const Expr *RetE = RS->getRetValue();
- if (!RetE)
- return;
-
- SVal V = C.getState()->getSVal(RetE);
- const MemRegion *R = V.getAsRegion();
-
- if (!R || !R->hasStackStorage())
- return;
-
- if (R->hasStackStorage()) {
- EmitStackError(C, R, RetE);
- return;
- }
-}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
index d756be7..7a99e86 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp
@@ -200,15 +200,19 @@ bool SVal::isConstant() const {
return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this);
}
-bool SVal::isZeroConstant() const {
+bool SVal::isConstant(int I) const {
if (isa<loc::ConcreteInt>(*this))
- return cast<loc::ConcreteInt>(*this).getValue() == 0;
+ return cast<loc::ConcreteInt>(*this).getValue() == I;
else if (isa<nonloc::ConcreteInt>(*this))
- return cast<nonloc::ConcreteInt>(*this).getValue() == 0;
+ return cast<nonloc::ConcreteInt>(*this).getValue() == I;
else
return false;
}
+bool SVal::isZeroConstant() const {
+ return isConstant(0);
+}
+
//===----------------------------------------------------------------------===//
// Transfer function dispatch for Non-Locs.
diff --git a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
index 542fc1b..a7e15fc 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp
@@ -29,15 +29,15 @@ SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
if (isa<Loc>(L)) {
if (isa<Loc>(R))
- return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
+ return EvalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T);
return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
}
if (isa<Loc>(R)) {
- // Support pointer arithmetic where the increment/decrement operand
- // is on the left and the pointer on the right.
- assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
+ // Support pointer arithmetic where the addend is on the left
+ // and the pointer on the right.
+ assert(Op == BinaryOperator::Add);
// Commute the operands.
return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
index 8c423a9..321381b 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp
@@ -35,12 +35,11 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
case BinaryOperator::Or:
case BinaryOperator::Xor:
return false;
- // We don't reason yet about arithmetic constraints on symbolic values.
+ // We don't reason yet about these arithmetic constraints on
+ // symbolic values.
case BinaryOperator::Mul:
case BinaryOperator::Div:
case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
case BinaryOperator::Shl:
case BinaryOperator::Shr:
return false;
@@ -90,12 +89,11 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
while (SubR) {
// FIXME: now we only find the first symbolic region.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
+ const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth();
if (Assumption)
- return AssumeSymNE(state, SymR->getSymbol(),
- BasicVals.getZeroWithPtrWidth());
+ return AssumeSymNE(state, SymR->getSymbol(), zero, zero);
else
- return AssumeSymEQ(state, SymR->getSymbol(),
- BasicVals.getZeroWithPtrWidth());
+ return AssumeSymEQ(state, SymR->getSymbol(), zero, zero);
}
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
@@ -121,11 +119,27 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state,
return SU.ProcessAssume(state, cond, assumption);
}
+static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
+ // FIXME: This should probably be part of BinaryOperator, since this isn't
+ // the only place it's used. (This code was copied from SimpleSValuator.cpp.)
+ switch (op) {
+ default:
+ assert(false && "Invalid opcode.");
+ case BinaryOperator::LT: return BinaryOperator::GE;
+ case BinaryOperator::GT: return BinaryOperator::LE;
+ case BinaryOperator::LE: return BinaryOperator::GT;
+ case BinaryOperator::GE: return BinaryOperator::LT;
+ case BinaryOperator::EQ: return BinaryOperator::NE;
+ case BinaryOperator::NE: return BinaryOperator::EQ;
+ }
+}
+
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
NonLoc Cond,
bool Assumption) {
- // We cannot reason about SymIntExpr and SymSymExpr.
+ // We cannot reason about SymSymExprs,
+ // and can only reason about some SymIntExprs.
if (!canReasonAbout(Cond)) {
// Just return the current state indicating that the path is feasible.
// This may be an over-approximation of what is possible.
@@ -144,30 +158,35 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
SymbolRef sym = SV.getSymbol();
QualType T = SymMgr.getType(sym);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
-
- return Assumption ? AssumeSymNE(state, sym, zero)
- : AssumeSymEQ(state, sym, zero);
+ if (Assumption)
+ return AssumeSymNE(state, sym, zero, zero);
+ else
+ return AssumeSymEQ(state, sym, zero, zero);
}
case nonloc::SymExprValKind: {
nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
- if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression())){
- // FIXME: This is a hack. It silently converts the RHS integer to be
- // of the same type as on the left side. This should be removed once
- // we support truncation/extension of symbolic values.
- GRStateManager &StateMgr = state->getStateManager();
- ASTContext &Ctx = StateMgr.getContext();
- QualType LHSType = SE->getLHS()->getType(Ctx);
- BasicValueFactory &BasicVals = StateMgr.getBasicVals();
- const llvm::APSInt &RHS = BasicVals.Convert(LHSType, SE->getRHS());
- SymIntExpr SENew(SE->getLHS(), SE->getOpcode(), RHS, SE->getType(Ctx));
-
- return AssumeSymInt(state, Assumption, &SENew);
+
+ // For now, we only handle expressions whose RHS is an integer.
+ // All other expressions are assumed to be feasible.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression());
+ if (!SE)
+ return state;
+
+ BinaryOperator::Opcode op = SE->getOpcode();
+ // Implicitly compare non-comparison expressions to 0.
+ if (!BinaryOperator::isComparisonOp(op)) {
+ QualType T = SymMgr.getType(SE);
+ const llvm::APSInt &zero = BasicVals.getValue(0, T);
+ op = (Assumption ? BinaryOperator::NE : BinaryOperator::EQ);
+ return AssumeSymRel(state, SE, op, zero);
}
- // For all other symbolic expressions, over-approximate and consider
- // the constraint feasible.
- return state;
+ // From here on out, op is the real comparison we'll be testing.
+ if (!Assumption)
+ op = NegateComparison(op);
+
+ return AssumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
case nonloc::ConcreteIntKind: {
@@ -182,43 +201,98 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
} // end switch
}
-const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
- bool Assumption,
- const SymIntExpr *SE) {
+const GRState *SimpleConstraintManager::AssumeSymRel(const GRState *state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int) {
+ assert(BinaryOperator::isComparisonOp(op) &&
+ "Non-comparison ops should be rewritten as comparisons to zero.");
+
+ // We only handle simple comparisons of the form "$sym == constant"
+ // or "($sym+constant1) == constant2".
+ // The adjustment is "constant1" in the above expression. It's used to
+ // "slide" the solution range around for modular arithmetic. For example,
+ // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+ // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+ // the subclasses of SimpleConstraintManager to handle the adjustment.
+ llvm::APSInt Adjustment;
+
+ // First check if the LHS is a simple symbol reference.
+ SymbolRef Sym = dyn_cast<SymbolData>(LHS);
+ if (Sym) {
+ Adjustment = 0;
+ } else {
+ // Next, see if it's a "($sym+constant1)" expression.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
+
+ // We don't handle "($sym1+$sym2)".
+ // Give up and assume the constraint is feasible.
+ if (!SE)
+ return state;
+
+ // We don't handle "(<expr>+constant1)".
+ // Give up and assume the constraint is feasible.
+ Sym = dyn_cast<SymbolData>(SE->getLHS());
+ if (!Sym)
+ return state;
+
+ // Get the constant out of the expression "($sym+constant1)".
+ switch (SE->getOpcode()) {
+ case BinaryOperator::Add:
+ Adjustment = SE->getRHS();
+ break;
+ case BinaryOperator::Sub:
+ Adjustment = -SE->getRHS();
+ break;
+ default:
+ // We don't handle non-additive operators.
+ // Give up and assume the constraint is feasible.
+ return state;
+ }
+ }
+
+ // FIXME: This next section is a hack. It silently converts the integers to
+ // be of the same type as the symbol, which is not always correct. Really the
+ // comparisons should be performed using the Int's type, then mapped back to
+ // the symbol's range of values.
+ GRStateManager &StateMgr = state->getStateManager();
+ ASTContext &Ctx = StateMgr.getContext();
+
+ QualType T = Sym->getType(Ctx);
+ assert(T->isIntegerType() || Loc::IsLocType(T));
+ unsigned bitwidth = Ctx.getTypeSize(T);
+ bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ // Convert the adjustment.
+ Adjustment.setIsUnsigned(isSymUnsigned);
+ Adjustment.extOrTrunc(bitwidth);
- // Here we assume that LHS is a symbol. This is consistent with the
- // rest of the constraint manager logic.
- SymbolRef Sym = cast<SymbolData>(SE->getLHS());
- const llvm::APSInt &Int = SE->getRHS();
+ // Convert the right-hand side integer.
+ llvm::APSInt ConvertedInt(Int, isSymUnsigned);
+ ConvertedInt.extOrTrunc(bitwidth);
- switch (SE->getOpcode()) {
+ switch (op) {
default:
// No logic yet for other operators. Assume the constraint is feasible.
return state;
case BinaryOperator::EQ:
- return Assumption ? AssumeSymEQ(state, Sym, Int)
- : AssumeSymNE(state, Sym, Int);
+ return AssumeSymEQ(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::NE:
- return Assumption ? AssumeSymNE(state, Sym, Int)
- : AssumeSymEQ(state, Sym, Int);
+ return AssumeSymNE(state, Sym, ConvertedInt, Adjustment);
+
case BinaryOperator::GT:
- return Assumption ? AssumeSymGT(state, Sym, Int)
- : AssumeSymLE(state, Sym, Int);
+ return AssumeSymGT(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::GE:
- return Assumption ? AssumeSymGE(state, Sym, Int)
- : AssumeSymLT(state, Sym, Int);
+ return AssumeSymGE(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::LT:
- return Assumption ? AssumeSymLT(state, Sym, Int)
- : AssumeSymGE(state, Sym, Int);
+ return AssumeSymLT(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::LE:
- return Assumption ? AssumeSymLE(state, Sym, Int)
- : AssumeSymGT(state, Sym, Int);
+ return AssumeSymLE(state, Sym, ConvertedInt, Adjustment);
} // end switch
}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
index 5f20e00..45057e6 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h
@@ -38,8 +38,10 @@ public:
const GRState *Assume(const GRState *state, NonLoc Cond, bool Assumption);
- const GRState *AssumeSymInt(const GRState *state, bool Assumption,
- const SymIntExpr *SE);
+ const GRState *AssumeSymRel(const GRState *state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int);
const GRState *AssumeInBound(const GRState *state, DefinedSVal Idx,
DefinedSVal UpperBound,
@@ -51,23 +53,31 @@ protected:
// Interface that subclasses must implement.
//===------------------------------------------------------------------===//
+ // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+ // operation for the method being invoked.
virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymEQ(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymLT(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymGT(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymLE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
//===------------------------------------------------------------------===//
// Internal implementation.
diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
index dd38a43..3bc4ee7 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp
@@ -30,10 +30,17 @@ public:
virtual SVal EvalComplement(NonLoc val);
virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy);
- virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
- QualType resultTy);
+ virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs, QualType resultTy);
virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
+
+ /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V);
+
+ SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS, QualType resultTy);
};
} // end anonymous namespace
@@ -170,45 +177,93 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
}
-// Equality operators for Locs.
-// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
-// implemented.
-
-static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
- QualType resultTy) {
+static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) {
+ switch (op) {
+ default:
+ assert(false && "Invalid opcode.");
+ case BinaryOperator::LT: return BinaryOperator::GT;
+ case BinaryOperator::GT: return BinaryOperator::LT;
+ case BinaryOperator::LE: return BinaryOperator::GE;
+ case BinaryOperator::GE: return BinaryOperator::LE;
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ return op;
+ }
+}
- switch (lhs.getSubKind()) {
- default:
- assert(false && "EQ/NE not implemented for this Loc.");
- return UnknownVal();
+SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS,
+ QualType resultTy) {
+ bool isIdempotent = false;
- case loc::ConcreteIntKind: {
- if (SymbolRef rSym = rhs.getAsSymbol())
- return ValMgr.makeNonLoc(rSym,
- isEqual ? BinaryOperator::EQ
- : BinaryOperator::NE,
- cast<loc::ConcreteInt>(lhs).getValue(),
- resultTy);
- break;
- }
- case loc::MemRegionKind: {
- if (SymbolRef lSym = lhs.getAsLocSymbol()) {
- if (isa<loc::ConcreteInt>(rhs)) {
- return ValMgr.makeNonLoc(lSym,
- isEqual ? BinaryOperator::EQ
- : BinaryOperator::NE,
- cast<loc::ConcreteInt>(rhs).getValue(),
- resultTy);
- }
- }
- break;
+ // Check for a few special cases with known reductions first.
+ switch (op) {
+ default:
+ // We can't reduce this case; just treat it normally.
+ break;
+ case BinaryOperator::Mul:
+ // a*0 and a*1
+ if (RHS == 0)
+ return ValMgr.makeIntVal(0, resultTy);
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Div:
+ // a/0 and a/1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Rem:
+ // a%0 and a%1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ return ValMgr.makeIntVal(0, resultTy);
+ break;
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::Xor:
+ // a+0, a-0, a<<0, a>>0, a^0
+ if (RHS == 0)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::And:
+ // a&0 and a&(~0)
+ if (RHS == 0)
+ return ValMgr.makeIntVal(0, resultTy);
+ else if (RHS.isAllOnesValue())
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Or:
+ // a|0 and a|(~0)
+ if (RHS == 0)
+ isIdempotent = true;
+ else if (RHS.isAllOnesValue()) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ const llvm::APSInt &Result = BVF.Convert(resultTy, RHS);
+ return nonloc::ConcreteInt(Result);
}
+ break;
+ }
- case loc::GotoLabelKind:
- break;
+ // Idempotent ops (like a*1) can still change the type of an expression.
+ // Wrap the LHS up in a NonLoc again and let EvalCastNL do the dirty work.
+ if (isIdempotent) {
+ if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS))
+ return EvalCastNL(nonloc::SymbolVal(LHSSym), resultTy);
+ return EvalCastNL(nonloc::SymExprVal(LHS), resultTy);
}
- return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy);
+ // If we reach this point, the expression cannot be simplified.
+ // Make a SymExprVal for the entire thing.
+ return ValMgr.makeNonLoc(LHS, op, RHS, resultTy);
}
SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
@@ -228,6 +283,12 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case BinaryOperator::GT:
case BinaryOperator::NE:
return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::Xor:
+ case BinaryOperator::Sub:
+ return ValMgr.makeIntVal(0, resultTy);
+ case BinaryOperator::Or:
+ case BinaryOperator::And:
+ return EvalCastNL(lhs, resultTy);
}
while (1) {
@@ -238,7 +299,8 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
case nonloc::LocAsIntegerKind:
- return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(),
+ return EvalBinOpLL(state, op, lhsL,
+ cast<nonloc::LocAsInteger>(rhs).getLoc(),
resultTy);
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
@@ -246,7 +308,7 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
i.setIsUnsigned(true);
i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
- return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy);
+ return EvalBinOpLL(state, op, lhsL, ValMgr.makeLoc(i), resultTy);
}
default:
switch (op) {
@@ -261,87 +323,136 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
}
case nonloc::SymExprValKind: {
- // Logical not?
- if (!(op == BinaryOperator::EQ && rhs.isZeroConstant()))
+ nonloc::SymExprVal *selhs = cast<nonloc::SymExprVal>(&lhs);
+
+ // Only handle LHS of the form "$sym op constant", at least for now.
+ const SymIntExpr *symIntExpr =
+ dyn_cast<SymIntExpr>(selhs->getSymbolicExpression());
+
+ if (!symIntExpr)
return UnknownVal();
- const SymExpr *symExpr =
- cast<nonloc::SymExprVal>(lhs).getSymbolicExpression();
+ // Is this a logical not? (!x is represented as x == 0.)
+ if (op == BinaryOperator::EQ && rhs.isZeroConstant()) {
+ // We know how to negate certain expressions. Simplify them here.
- // Only handle ($sym op constant) for now.
- if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) {
BinaryOperator::Opcode opc = symIntExpr->getOpcode();
switch (opc) {
- case BinaryOperator::LAnd:
- case BinaryOperator::LOr:
- assert(false && "Logical operators handled by branching logic.");
- return UnknownVal();
- case BinaryOperator::Assign:
- case BinaryOperator::MulAssign:
- case BinaryOperator::DivAssign:
- case BinaryOperator::RemAssign:
- case BinaryOperator::AddAssign:
- case BinaryOperator::SubAssign:
- case BinaryOperator::ShlAssign:
- case BinaryOperator::ShrAssign:
- case BinaryOperator::AndAssign:
- case BinaryOperator::XorAssign:
- case BinaryOperator::OrAssign:
- case BinaryOperator::Comma:
- assert(false && "'=' and ',' operators handled by GRExprEngine.");
- return UnknownVal();
- case BinaryOperator::PtrMemD:
- case BinaryOperator::PtrMemI:
- assert(false && "Pointer arithmetic not handled here.");
- return UnknownVal();
- case BinaryOperator::Mul:
- case BinaryOperator::Div:
- case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
- case BinaryOperator::Shl:
- case BinaryOperator::Shr:
- case BinaryOperator::And:
- case BinaryOperator::Xor:
- case BinaryOperator::Or:
- // Not handled yet.
- return UnknownVal();
- case BinaryOperator::LT:
- case BinaryOperator::GT:
- case BinaryOperator::LE:
- case BinaryOperator::GE:
- case BinaryOperator::EQ:
- case BinaryOperator::NE:
- opc = NegateComparison(opc);
- assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
- return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
- symIntExpr->getRHS(), resultTy);
+ default:
+ // We don't know how to negate this operation.
+ // Just handle it as if it were a normal comparison to 0.
+ break;
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ assert(false && "Logical operators handled by branching logic.");
+ return UnknownVal();
+ case BinaryOperator::Assign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::RemAssign:
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::Comma:
+ assert(false && "'=' and ',' operators handled by GRExprEngine.");
+ return UnknownVal();
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ assert(false && "Pointer arithmetic not handled here.");
+ return UnknownVal();
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ // Negate the comparison and make a value.
+ opc = NegateComparison(opc);
+ assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
+ return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
}
}
+
+ // For now, only handle expressions whose RHS is a constant.
+ const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
+ if (!rhsInt)
+ return UnknownVal();
+
+ // If both the LHS and the current expression are additive,
+ // fold their constants.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ const llvm::APSInt &first =
+ BVF.Convert(resultTy, symIntExpr->getRHS());
+ const llvm::APSInt &second =
+ BVF.Convert(resultTy, rhsInt->getValue());
+
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BVF.EvaluateAPSInt(BinaryOperator::Add, first, second);
+ else
+ newRHS = BVF.EvaluateAPSInt(BinaryOperator::Sub, first, second);
+ return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
+ }
+ }
+
+ // Otherwise, make a SymExprVal out of the expression.
+ return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
}
case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
+
if (isa<nonloc::ConcreteInt>(rhs)) {
- const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
- }
- else {
+ } else {
+ const llvm::APSInt& lhsValue = lhsInt.getValue();
+
// Swap the left and right sides and flip the operator if doing so
// allows us to better reason about the expression (this is a form
// of expression canonicalization).
+ // While we're at it, catch some special cases for non-commutative ops.
NonLoc tmp = rhs;
rhs = lhs;
lhs = tmp;
switch (op) {
- case BinaryOperator::LT: op = BinaryOperator::GT; continue;
- case BinaryOperator::GT: op = BinaryOperator::LT; continue;
- case BinaryOperator::LE: op = BinaryOperator::GE; continue;
- case BinaryOperator::GE: op = BinaryOperator::LE; continue;
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ op = ReverseComparison(op);
+ continue;
case BinaryOperator::EQ:
case BinaryOperator::NE:
case BinaryOperator::Add:
case BinaryOperator::Mul:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
continue;
+ case BinaryOperator::Shr:
+ if (lhsValue.isAllOnesValue() && lhsValue.isSigned())
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ // FALL-THROUGH
+ case BinaryOperator::Shl:
+ if (lhsValue == 0)
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ return UnknownVal();
default:
return UnknownVal();
}
@@ -377,9 +488,9 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
if (isa<nonloc::ConcreteInt>(rhs)) {
- return ValMgr.makeNonLoc(slhs->getSymbol(), op,
- cast<nonloc::ConcreteInt>(rhs).getValue(),
- resultTy);
+ return MakeSymIntVal(slhs->getSymbol(), op,
+ cast<nonloc::ConcreteInt>(rhs).getValue(),
+ resultTy);
}
return UnknownVal();
@@ -388,21 +499,301 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
}
-SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
+// FIXME: all this logic will change if/when we have MemRegion::getLocation().
+SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
+ BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs,
QualType resultTy) {
- switch (op) {
+ // Only comparisons and subtractions are valid operations on two pointers.
+ // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
+ // However, if a pointer is casted to an integer, EvalBinOpNN may end up
+ // calling this function with another operation (PR7527). We don't attempt to
+ // model this for now, but it could be useful, particularly when the
+ // "location" is actually an integer value that's been passed through a void*.
+ if (!(BinaryOperator::isComparisonOp(op) || op == BinaryOperator::Sub))
+ return UnknownVal();
+
+ // Special cases for when both sides are identical.
+ if (lhs == rhs) {
+ switch (op) {
default:
+ assert(false && "Unimplemented operation for two identical values");
return UnknownVal();
+ case BinaryOperator::Sub:
+ return ValMgr.makeZeroVal(resultTy);
case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
case BinaryOperator::NE:
- return EvalEquality(ValMgr, lhs, rhs, op == BinaryOperator::EQ, resultTy);
case BinaryOperator::LT:
case BinaryOperator::GT:
- // FIXME: Generalize. For now, just handle the trivial case where
- // the two locations are identical.
- if (lhs == rhs)
+ return ValMgr.makeTruthVal(false, resultTy);
+ }
+ }
+
+ switch (lhs.getSubKind()) {
+ default:
+ assert(false && "Ordering not implemented for this Loc.");
+ return UnknownVal();
+
+ case loc::GotoLabelKind:
+ // The only thing we know about labels is that they're non-null.
+ if (rhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::Sub:
+ return EvalCastL(lhs, resultTy);
+ case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::LT:
return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+ // There may be two labels for the same location, and a function region may
+ // have the same address as a label at the start of the function (depending
+ // on the ABI).
+ // FIXME: we can probably do a comparison against other MemRegions, though.
+ // FIXME: is there a way to tell if two labels refer to the same location?
+ return UnknownVal();
+
+ case loc::ConcreteIntKind: {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef rSym = rhs.getAsLocSymbol()) {
+ // We can only build expressions with symbols on the left,
+ // so we need a reversible operator.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue();
+ return ValMgr.makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
+ }
+
+ // If both operands are constants, just perform the operation.
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ SVal ResultVal = cast<loc::ConcreteInt>(lhs).EvalBinOp(BVF, op, *rInt);
+ if (Loc *Result = dyn_cast<Loc>(&ResultVal))
+ return EvalCastL(*Result, resultTy);
+ else
+ return UnknownVal();
+ }
+
+ // Special case comparisons against NULL.
+ // This must come after the test if the RHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL, as is any label.
+ assert(isa<loc::MemRegionVal>(rhs) || isa<loc::GotoLabel>(rhs));
+ if (lhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::EQ:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::LT:
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing an arbitrary integer to a region or label address is
+ // completely unknowable.
+ return UnknownVal();
+ }
+ case loc::MemRegionKind: {
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef lSym = lhs.getAsLocSymbol())
+ return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy);
+
+ // Special case comparisons to NULL.
+ // This must come after the test if the LHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL.
+ if (rInt->isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::Sub:
+ return EvalCastL(lhs, resultTy);
+ case BinaryOperator::EQ:
+ case BinaryOperator::LT:
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing a region to an arbitrary integer is completely unknowable.
+ return UnknownVal();
+ }
+
+ // Get both values as regions, if possible.
+ const MemRegion *LeftMR = lhs.getAsRegion();
+ assert(LeftMR && "MemRegionKind SVal doesn't have a region!");
+
+ const MemRegion *RightMR = rhs.getAsRegion();
+ if (!RightMR)
+ // The RHS is probably a label, which in theory could address a region.
+ // FIXME: we can probably make a more useful statement about non-code
+ // regions, though.
+ return UnknownVal();
+
+ // If both values wrap regions, see if they're from different base regions.
+ const MemRegion *LeftBase = LeftMR->getBaseRegion();
+ const MemRegion *RightBase = RightMR->getBaseRegion();
+ if (LeftBase != RightBase &&
+ !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) {
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::EQ:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // The two regions are from the same base region. See if they're both a
+ // type of region we know how to compare.
+
+ // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this
+ // ElementRegion path and the FieldRegion path below should be unified.
+ if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) {
+ // First see if the right region is also an ElementRegion.
+ const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR);
+ if (!RightER)
+ return UnknownVal();
+
+ // Next, see if the two ERs have the same super-region and matching types.
+ // FIXME: This should do something useful even if the types don't match,
+ // though if both indexes are constant the RegionRawOffset path will
+ // give the correct answer.
+ if (LeftER->getSuperRegion() == RightER->getSuperRegion() &&
+ LeftER->getElementType() == RightER->getElementType()) {
+ // Get the left index and cast it to the correct type.
+ // If the index is unknown or undefined, bail out here.
+ SVal LeftIndexVal = LeftER->getIndex();
+ NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+ LeftIndexVal = EvalCastNL(*LeftIndex, resultTy);
+ LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+
+ // Do the same for the right index.
+ SVal RightIndexVal = RightER->getIndex();
+ NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+ RightIndexVal = EvalCastNL(*RightIndex, resultTy);
+ RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+
+ // Actually perform the operation.
+ // EvalBinOpNN expects the two indexes to already be the right type.
+ return EvalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+ }
+
+ // If the element indexes aren't comparable, see if the raw offsets are.
+ RegionRawOffset LeftOffset = LeftER->getAsRawOffset();
+ RegionRawOffset RightOffset = RightER->getAsRawOffset();
+
+ if (LeftOffset.getRegion() != NULL &&
+ LeftOffset.getRegion() == RightOffset.getRegion()) {
+ int64_t left = LeftOffset.getByteOffset();
+ int64_t right = RightOffset.getByteOffset();
+
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::LT:
+ return ValMgr.makeTruthVal(left < right, resultTy);
+ case BinaryOperator::GT:
+ return ValMgr.makeTruthVal(left > right, resultTy);
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(left <= right, resultTy);
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(left >= right, resultTy);
+ case BinaryOperator::EQ:
+ return ValMgr.makeTruthVal(left == right, resultTy);
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(left != right, resultTy);
+ }
+ }
+
+ // If we get here, we have no way of comparing the ElementRegions.
return UnknownVal();
+ }
+
+ // See if both regions are fields of the same structure.
+ // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars.
+ if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) {
+ // Only comparisons are meaningful here!
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ // First see if the right region is also a FieldRegion.
+ const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR);
+ if (!RightFR)
+ return UnknownVal();
+
+ // Next, see if the two FRs have the same super-region.
+ // FIXME: This doesn't handle casts yet, and simply stripping the casts
+ // doesn't help.
+ if (LeftFR->getSuperRegion() != RightFR->getSuperRegion())
+ return UnknownVal();
+
+ const FieldDecl *LeftFD = LeftFR->getDecl();
+ const FieldDecl *RightFD = RightFR->getDecl();
+ const RecordDecl *RD = LeftFD->getParent();
+
+ // Make sure the two FRs are from the same kind of record. Just in case!
+ // FIXME: This is probably where inheritance would be a problem.
+ if (RD != RightFD->getParent())
+ return UnknownVal();
+
+ // We know for sure that the two fields are not the same, since that
+ // would have given us the same SVal.
+ if (op == BinaryOperator::EQ)
+ return ValMgr.makeTruthVal(false, resultTy);
+ if (op == BinaryOperator::NE)
+ return ValMgr.makeTruthVal(true, resultTy);
+
+ // Iterate through the fields and see which one comes first.
+ // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
+ // members and the units in which bit-fields reside have addresses that
+ // increase in the order in which they are declared."
+ bool leftFirst = (op == BinaryOperator::LT || op == BinaryOperator::LE);
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I!=E; ++I) {
+ if (*I == LeftFD)
+ return ValMgr.makeTruthVal(leftFirst, resultTy);
+ if (*I == RightFD)
+ return ValMgr.makeTruthVal(!leftFirst, resultTy);
+ }
+
+ assert(false && "Fields not found in parent record's definition");
+ }
+
+ // If we get here, we have no way of comparing the regions.
+ return UnknownVal();
+ }
}
}
@@ -414,7 +805,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
// triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
// can generate comparisons that trigger this code.
// FIXME: Are all locations guaranteed to have pointer width?
- if (BinaryOperator::isEqualityOp(op)) {
+ if (BinaryOperator::isComparisonOp(op)) {
if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
const llvm::APSInt *x = &rhsInt->getValue();
ASTContext &ctx = ValMgr.getContext();
@@ -423,7 +814,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
if (x->isSigned())
x = &ValMgr.getBasicValueFactory().getValue(*x, true);
- return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy);
+ return EvalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
}
@@ -432,3 +823,21 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
return state->getStateManager().getStoreManager().EvalBinOp(op, lhs,
rhs, resultTy);
}
+
+const llvm::APSInt *SimpleSValuator::getKnownValue(const GRState *state,
+ SVal V) {
+ if (V.isUnknownOrUndef())
+ return NULL;
+
+ if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getSymVal(Sym);
+
+ // FIXME: Add support for SymExprs.
+ return NULL;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp
new file mode 100644
index 0000000..f4a9db6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp
@@ -0,0 +1,204 @@
+//=== StackAddrLeakChecker.cpp ------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stack address leak checker, which checks if an invalid
+// stack address is stored into a global or heap location. See CERT DCL30-C.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+namespace {
+class StackAddrLeakChecker : public CheckerVisitor<StackAddrLeakChecker> {
+ BuiltinBug *BT_stackleak;
+ BuiltinBug *BT_returnstack;
+
+public:
+ StackAddrLeakChecker() : BT_stackleak(0), BT_returnstack(0) {}
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
+ void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+private:
+ void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
+ SourceRange GenName(llvm::raw_ostream &os, const MemRegion *R,
+ SourceManager &SM);
+};
+}
+
+void clang::RegisterStackAddrLeakChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new StackAddrLeakChecker());
+}
+
+SourceRange StackAddrLeakChecker::GenName(llvm::raw_ostream &os,
+ const MemRegion *R,
+ SourceManager &SM) {
+ // Get the base region, stripping away fields and elements.
+ R = R->getBaseRegion();
+ SourceRange range;
+ os << "Address of ";
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ const CompoundLiteralExpr* CL = CR->getLiteralExpr();
+ os << "stack memory associated with a compound literal "
+ "declared on line "
+ << SM.getInstantiationLineNumber(CL->getLocStart())
+ << " returned to caller";
+ range = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+ const Expr* ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ range = ARE->getSourceRange();
+ os << "stack memory allocated by call to alloca() on line "
+ << SM.getInstantiationLineNumber(L);
+ }
+ else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+ SourceLocation L = BD->getLocStart();
+ range = BD->getSourceRange();
+ os << "stack-allocated block declared on line "
+ << SM.getInstantiationLineNumber(L);
+ }
+ else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "stack memory associated with local variable '"
+ << VR->getString() << '\'';
+ range = VR->getDecl()->getSourceRange();
+ }
+ else {
+ assert(false && "Invalid region in ReturnStackAddressChecker.");
+ }
+
+ return range;
+}
+
+void StackAddrLeakChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) {
+ ExplodedNode *N = C.GenerateSink();
+
+ if (!N)
+ return;
+
+ if (!BT_returnstack)
+ BT_returnstack=new BuiltinBug("Return of address to stack-allocated memory");
+
+ // Generate a report for this bug.
+ llvm::SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, R, C.getSourceManager());
+ os << " returned to caller";
+ RangedBugReport *report = new RangedBugReport(*BT_returnstack, os.str(), N);
+ report->addRange(RetE->getSourceRange());
+ if (range.isValid())
+ report->addRange(range);
+
+ C.EmitReport(report);
+}
+
+void StackAddrLeakChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *RS) {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = C.getState()->getSVal(RetE);
+ const MemRegion *R = V.getAsRegion();
+
+ if (!R || !R->hasStackStorage())
+ return;
+
+ if (R->hasStackStorage()) {
+ EmitStackError(C, R, RetE);
+ return;
+ }
+}
+
+void StackAddrLeakChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
+ GRExprEngine &Eng) {
+ SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+ const GRState *state = B.getState();
+
+ // Iterate over all bindings to global variables and see if it contains
+ // a memory region in the stack space.
+ class CallBack : public StoreManager::BindingsHandler {
+ private:
+ const StackFrameContext *CurSFC;
+ public:
+ llvm::SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
+
+ CallBack(const LocationContext *LCtx)
+ : CurSFC(LCtx->getCurrentStackFrame()) {}
+
+ bool HandleBinding(StoreManager &SMgr, Store store,
+ const MemRegion *region, SVal val) {
+
+ if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
+ return true;
+
+ const MemRegion *vR = val.getAsRegion();
+ if (!vR)
+ return true;
+
+ if (const StackSpaceRegion *SSR =
+ dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
+ // If the global variable holds a location in the current stack frame,
+ // record the binding to emit a warning.
+ if (SSR->getStackFrame() == CurSFC)
+ V.push_back(std::make_pair(region, vR));
+ }
+
+ return true;
+ }
+ };
+
+ CallBack cb(B.getPredecessor()->getLocationContext());
+ state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
+
+ if (cb.V.empty())
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = B.generateNode(state, tag, B.getPredecessor());
+ if (!N)
+ return;
+
+ if (!BT_stackleak)
+ BT_stackleak =
+ new BuiltinBug("Stack address stored into global variable",
+ "Stack address was saved into a global variable. "
+ "This is dangerous because the address will become "
+ "invalid after returning from the function");
+
+ for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
+ // Generate a report for this bug.
+ llvm::SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, cb.V[i].second,
+ Eng.getContext().getSourceManager());
+ os << " is still referred to by the global variable '";
+ const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
+ os << VR->getDecl()->getNameAsString()
+ << "' upon returning to the caller. This will be a dangling reference";
+ RangedBugReport *report = new RangedBugReport(*BT_stackleak, os.str(), N);
+ if (range.isValid())
+ report->addRange(range);
+
+ Eng.getBugReporter().EmitReport(report);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/Store.cpp b/contrib/llvm/tools/clang/lib/Checker/Store.cpp
index c12065b..b128331 100644
--- a/contrib/llvm/tools/clang/lib/Checker/Store.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/Store.cpp
@@ -91,7 +91,8 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind: {
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind: {
assert(0 && "Invalid region cast");
break;
}
@@ -232,17 +233,6 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R,
return V;
}
-Store StoreManager::InvalidateRegions(Store store,
- const MemRegion * const *I,
- const MemRegion * const *End,
- const Expr *E, unsigned Count,
- InvalidatedSymbols *IS) {
- for ( ; I != End ; ++I)
- store = InvalidateRegion(store, *I, E, Count, IS);
-
- return store;
-}
-
SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
diff --git a/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp
new file mode 100644
index 0000000..c527ca2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp
@@ -0,0 +1,287 @@
+//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that model and check stream handling functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+
+namespace {
+
+class StreamChecker : public CheckerVisitor<StreamChecker> {
+ IdentifierInfo *II_fopen, *II_fread, *II_fwrite,
+ *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
+ *II_clearerr, *II_feof, *II_ferror, *II_fileno;
+ BuiltinBug *BT_nullfp, *BT_illegalwhence;
+
+public:
+ StreamChecker()
+ : II_fopen(0), II_fread(0), II_fwrite(0),
+ II_fseek(0), II_ftell(0), II_rewind(0), II_fgetpos(0), II_fsetpos(0),
+ II_clearerr(0), II_feof(0), II_ferror(0), II_fileno(0),
+ BT_nullfp(0), BT_illegalwhence(0) {}
+
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+
+private:
+ void Fopen(CheckerContext &C, const CallExpr *CE);
+ void Fread(CheckerContext &C, const CallExpr *CE);
+ void Fwrite(CheckerContext &C, const CallExpr *CE);
+ void Fseek(CheckerContext &C, const CallExpr *CE);
+ void Ftell(CheckerContext &C, const CallExpr *CE);
+ void Rewind(CheckerContext &C, const CallExpr *CE);
+ void Fgetpos(CheckerContext &C, const CallExpr *CE);
+ void Fsetpos(CheckerContext &C, const CallExpr *CE);
+ void Clearerr(CheckerContext &C, const CallExpr *CE);
+ void Feof(CheckerContext &C, const CallExpr *CE);
+ void Ferror(CheckerContext &C, const CallExpr *CE);
+ void Fileno(CheckerContext &C, const CallExpr *CE);
+
+ // Return true indicates the stream pointer is NULL.
+ const GRState *CheckNullStream(SVal SV, const GRState *state,
+ CheckerContext &C);
+};
+
+} // end anonymous namespace
+
+void clang::RegisterStreamChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new StreamChecker());
+}
+
+bool StreamChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_fopen)
+ II_fopen = &Ctx.Idents.get("fopen");
+ if (!II_fread)
+ II_fread = &Ctx.Idents.get("fread");
+ if (!II_fwrite)
+ II_fwrite = &Ctx.Idents.get("fwrite");
+ if (!II_fseek)
+ II_fseek = &Ctx.Idents.get("fseek");
+ if (!II_ftell)
+ II_ftell = &Ctx.Idents.get("ftell");
+ if (!II_rewind)
+ II_rewind = &Ctx.Idents.get("rewind");
+ if (!II_fgetpos)
+ II_fgetpos = &Ctx.Idents.get("fgetpos");
+ if (!II_fsetpos)
+ II_fsetpos = &Ctx.Idents.get("fsetpos");
+ if (!II_clearerr)
+ II_clearerr = &Ctx.Idents.get("clearerr");
+ if (!II_feof)
+ II_feof = &Ctx.Idents.get("feof");
+ if (!II_ferror)
+ II_ferror = &Ctx.Idents.get("ferror");
+ if (!II_fileno)
+ II_fileno = &Ctx.Idents.get("fileno");
+
+ if (FD->getIdentifier() == II_fopen) {
+ Fopen(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fread) {
+ Fread(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fwrite) {
+ Fwrite(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fseek) {
+ Fseek(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ftell) {
+ Ftell(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_rewind) {
+ Rewind(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fgetpos) {
+ Fgetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fsetpos) {
+ Fsetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_clearerr) {
+ Clearerr(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_feof) {
+ Feof(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ferror) {
+ Ferror(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fileno) {
+ Fileno(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ ValueManager &ValMgr = C.getValueManager();
+ DefinedSVal RetVal = cast<DefinedSVal>(ValMgr.getConjuredSymbolVal(0, CE,
+ Count));
+ state = state->BindExpr(CE, RetVal);
+
+ ConstraintManager &CM = C.getConstraintManager();
+ // Bifurcate the state into two: one with a valid FILE* pointer, the other
+ // with a NULL.
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, RetVal);
+
+ C.addTransition(stateNotNull);
+ C.addTransition(stateNull);
+}
+
+void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ return;
+}
+
+void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ return;
+}
+
+void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!(state = CheckNullStream(state->getSVal(CE->getArg(0)), state, C)))
+ return;
+ // Check the legality of the 'whence' argument of 'fseek'.
+ SVal Whence = state->getSVal(CE->getArg(2));
+ bool WhenceIsLegal = true;
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence);
+ if (!CI)
+ WhenceIsLegal = false;
+
+ int64_t x = CI->getValue().getSExtValue();
+ if (!(x == 0 || x == 1 || x == 2))
+ WhenceIsLegal = false;
+
+ if (!WhenceIsLegal) {
+ if (ExplodedNode *N = C.GenerateSink(state)) {
+ if (!BT_illegalwhence)
+ BT_illegalwhence = new BuiltinBug("Illegal whence argument",
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR.");
+ BugReport *R = new BugReport(*BT_illegalwhence,
+ BT_illegalwhence->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+const GRState *StreamChecker::CheckNullStream(SVal SV, const GRState *state,
+ CheckerContext &C) {
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&SV);
+ if (!DV)
+ return 0;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+
+ if (!stateNotNull && stateNull) {
+ if (ExplodedNode *N = C.GenerateSink(stateNull)) {
+ if (!BT_nullfp)
+ BT_nullfp = new BuiltinBug("NULL stream pointer",
+ "Stream pointer might be NULL.");
+ BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return 0;
+ }
+ return stateNotNull;
+}
diff --git a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
index f3a803c..c2b557e 100644
--- a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp
@@ -74,6 +74,10 @@ void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const {
<< getParentSymbol() << ',' << getRegion() << '}';
}
+void SymbolExtent::dumpToStream(llvm::raw_ostream& os) const {
+ os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+}
+
void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const {
os << "reg_$" << getSymbolID() << "<" << R << ">";
}
@@ -130,6 +134,22 @@ SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
return cast<SymbolDerived>(SD);
}
+const SymbolExtent*
+SymbolManager::getExtentSymbol(const SubRegion *R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolExtent::Profile(profile, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
+ new (SD) SymbolExtent(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolExtent>(SD);
+}
+
const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
BinaryOperator::Opcode op,
const llvm::APSInt& v,
@@ -170,11 +190,14 @@ QualType SymbolConjured::getType(ASTContext&) const {
return T;
}
-
QualType SymbolDerived::getType(ASTContext& Ctx) const {
return R->getValueType(Ctx);
}
+QualType SymbolExtent::getType(ASTContext& Ctx) const {
+ return Ctx.getSizeType();
+}
+
QualType SymbolRegionValue::getType(ASTContext& C) const {
return R->getValueType(C);
}
@@ -210,16 +233,25 @@ bool SymbolReaper::isLive(SymbolRef sym) {
return false;
}
+ if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) {
+ const MemRegion *Base = extent->getRegion()->getBaseRegion();
+ if (const VarRegion *VR = dyn_cast<VarRegion>(Base))
+ return isLive(VR);
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Base))
+ return isLive(SR->getSymbol());
+ return false;
+ }
+
// Interogate the symbol. It may derive from an input value to
// the analyzed function/method.
return isa<SymbolRegionValue>(sym);
}
-bool SymbolReaper::isLive(const Stmt* Loc, const Stmt* ExprVal) const {
+bool SymbolReaper::isLive(const Stmt* ExprVal) const {
return LCtx->getLiveVariables()->isLive(Loc, ExprVal);
}
-bool SymbolReaper::isLive(const Stmt *Loc, const VarRegion *VR) const {
+bool SymbolReaper::isLive(const VarRegion *VR) const {
const StackFrameContext *SFC = VR->getStackFrame();
if (SFC == LCtx->getCurrentStackFrame())
diff --git a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
index cea9d19..936991d 100644
--- a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
+++ b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp
@@ -9,10 +9,13 @@
//
// This defines VLASizeChecker, a builtin check in GRExprEngine that
// performs checks for declaration of VLA of undefined or zero size.
+// In addition, VLASizeChecker is responsible for defining the extent
+// of the MemRegion that represents a VLA.
//
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/AST/CharUnits.h"
#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
@@ -42,9 +45,9 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD)
return;
-
- const VariableArrayType *VLA
- = C.getASTContext().getAsVariableArrayType(VD->getType());
+
+ ASTContext &Ctx = C.getASTContext();
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
if (!VLA)
return;
@@ -70,9 +73,14 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
C.EmitReport(report);
return;
}
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (sizeV.isUnknown())
+ return;
// Check if the size is zero.
- DefinedOrUnknownSVal sizeD = cast<DefinedOrUnknownSVal>(sizeV);
+ DefinedSVal sizeD = cast<DefinedSVal>(sizeV);
const GRState *stateNotZero, *stateZero;
llvm::tie(stateNotZero, stateZero) = state->Assume(sizeD);
@@ -92,5 +100,36 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
}
// From this point on, assume that the size is not zero.
- C.addTransition(stateNotZero);
+ state = stateNotZero;
+
+ // VLASizeChecker is responsible for defining the extent of the array being
+ // declared. We do this by multiplying the array length by the element size,
+ // then matching that with the array region's extent symbol.
+
+ // Convert the array length to size_t.
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc ArrayLength = cast<NonLoc>(SV.EvalCast(sizeD, SizeTy, SE->getType()));
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
+ SVal EleSizeVal = ValMgr.makeIntVal(EleSize.getQuantity(), SizeTy);
+
+ // Multiply the array length by the element size.
+ SVal ArraySizeVal = SV.EvalBinOpNN(state, BinaryOperator::Mul, ArrayLength,
+ cast<NonLoc>(EleSizeVal), SizeTy);
+
+ // Finally, Assume that the array's extent matches the given size.
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ DefinedOrUnknownSVal Extent = state->getRegion(VD, LC)->getExtent(ValMgr);
+ DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal);
+ DefinedOrUnknownSVal SizeIsKnown = SV.EvalEQ(state, Extent, ArraySize);
+ state = state->Assume(SizeIsKnown, true);
+
+ // Assume should not fail at this point.
+ assert(state);
+
+ // Remember our assumptions!
+ C.addTransition(state);
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
index 1ab2f55..85524ac 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -11,11 +11,9 @@
#define CLANG_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
-
-#include <cassert>
+#include "llvm/Type.h"
namespace llvm {
- class Type;
class Value;
class LLVMContext;
}
@@ -70,7 +68,7 @@ namespace clang {
private:
Kind TheKind;
- const llvm::Type *TypeData;
+ llvm::PATypeHolder TypeData;
unsigned UIntData;
bool BoolData;
@@ -136,7 +134,11 @@ namespace clang {
virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
ASTContext &Ctx,
- llvm::LLVMContext &VMContext) const = 0;
+ llvm::LLVMContext &VMContext,
+ // This is the preferred type for argument lowering
+ // which can be used to generate better IR.
+ const llvm::Type *const *PrefTypes = 0,
+ unsigned NumPrefTypes = 0) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..69efe43
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,339 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ Diagnostic &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ FunctionPassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(Diagnostic &_Diags,
+ const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ if (CodeGenOpts.VerifyModule)
+ getPerFunctionPasses()->add(createVerifierPass());
+
+ // Assume that standard function passes aren't run for -O0.
+ if (OptLevel > 0)
+ llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+
+ llvm::Pass *InliningPass = 0;
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // Set the inline threshold following llvm-gcc.
+ //
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize)
+ Threshold = 75;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ InliningPass = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ InliningPass = createAlwaysInlinerPass(); // Respect always_inline
+ break;
+ }
+
+ // For now we always create per module passes.
+ llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ CodeGenOpts.OptimizeSize,
+ CodeGenOpts.UnitAtATime,
+ CodeGenOpts.UnrollLoops,
+ CodeGenOpts.SimplifyLibCalls,
+ /*HaveExceptions=*/true,
+ InliningPass);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = true;
+ } else {
+ llvm::NoFramePointerElim = true;
+ llvm::NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft")
+ llvm::FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ llvm::FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ llvm::FloatABIType = llvm::FloatABI::Default;
+ }
+
+ NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
+ UnwindTablesMandatory = CodeGenOpts.UnwindTables;
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.RelocationModel == "static") {
+ TargetMachine::setRelocationModel(llvm::Reloc::Static);
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
+ }
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.CodeModel == "small") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Small);
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Medium);
+ } else if (CodeGenOpts.CodeModel == "large") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Large);
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ TargetMachine::setCodeModel(llvm::CodeModel::Default);
+ }
+
+ std::vector<const char *> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ const_cast<char **>(&BackendArgs[0]));
+
+ std::string FeaturesStr;
+ if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ Features.setCPU(TargetOpts.CPU);
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+
+ // Create the code generator passes.
+ FunctionPassManager *PM = getCodeGenPasses();
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+}
+
+void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
index de58597..cb9e636 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -228,7 +228,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// block literal.
// __invoke
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
LocalDeclMap);
BlockHasCopyDispose |= Info.BlockHasCopyDispose;
Elts[3] = Fn;
@@ -253,7 +253,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSret(FnInfo))
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
flags |= BLOCK_USE_STRET;
}
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
@@ -296,8 +296,11 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
- Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
- } else
+ Types[i+BlockFields] =
+ llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+ } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
+ Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
+ } else
Types[i+BlockFields] = ConvertType(Ty);
}
@@ -358,11 +361,23 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
Builder.CreateStore(Loc, Addr);
continue;
} else {
- E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
- VD->getType(),
- SourceLocation());
+ if (BDRE->getCopyConstructorExpr()) {
+ E = BDRE->getCopyConstructorExpr();
+ PushDestructorCleanup(E->getType(), Addr);
+ }
+ else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType().getNonReferenceType(),
+ SourceLocation());
+ if (VD->getType()->isReferenceType()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ }
+ }
}
- }
if (BDRE->isByRef()) {
E = new (getContext())
@@ -386,8 +401,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
Loc = Builder.CreateBitCast(Loc, Ty);
@@ -599,13 +613,13 @@ void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
bool IsByRef) {
+
CharUnits offset = BlockDecls[VD];
assert(!offset.isZero() && "getting address of unallocated decl");
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
if (IsByRef) {
const llvm::Type *PtrStructTy
@@ -626,9 +640,10 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
V = Builder.CreateLoad(V);
} else {
const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
-
Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "ref.tmp");
}
return V;
}
@@ -680,7 +695,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
CGBlockInfo Info(n);
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap);
+ = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap);
assert(Info.BlockSize == BlockLiteralSize
&& "no imports allowed for global block");
@@ -719,7 +734,7 @@ llvm::Value *CodeGenFunction::LoadBlockStruct() {
}
llvm::Function *
-CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
@@ -792,18 +807,29 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
MangleBuffer Name;
- CGM.getMangledName(Name, BD);
+ CGM.getMangledName(GD, Name, BD);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name.getString(), &CGM.getModule());
CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+ QualType FnType(BlockFunctionType, 0);
+ bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
+
+ IdentifierInfo *ID = &getContext().Idents.get(Name.getString());
+ CurCodeDecl = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), ID, FnType,
+ 0,
+ FunctionDecl::Static,
+ FunctionDecl::None,
+ false, HasPrototype);
+
StartFunction(BD, ResultType, Fn, Args,
BExpr->getBody()->getLocEnd());
CurFuncDecl = OuterFuncDecl;
- CurCodeDecl = BD;
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
@@ -985,8 +1011,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
- llvm::Value *N = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, Dstv, Srcv, N);
}
@@ -1138,8 +1163,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
flag |= BLOCK_BYREF_CALLER;
- llvm::Value *N = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(T->getContext()), flag);
+ llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, DstObj, SrcObj, N);
@@ -1241,7 +1265,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() {
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ ArgTys.push_back(CGF.Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectDispose
= CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
@@ -1256,7 +1280,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() {
const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ ArgTys.push_back(CGF.Int32Ty);
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectAssign
= CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
@@ -1268,7 +1292,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
llvm::Value *F = getBlockObjectDispose();
llvm::Value *N;
V = Builder.CreateBitCast(V, PtrToInt8Ty);
- N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
+ N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
Builder.CreateCall2(F, V, N);
}
@@ -1276,7 +1300,7 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
CGBuilderTy &B)
- : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
+ : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) {
PtrToInt8Ty = llvm::PointerType::getUnqual(
llvm::Type::getInt8Ty(VMContext));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
index e9b2bd5..772a62c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -99,7 +99,7 @@ public:
llvm::Value *BlockObjectAssign;
llvm::Value *BlockObjectDispose;
- const llvm::Type *PtrToInt8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
std::map<uint64_t, llvm::Constant *> AssignCache;
std::map<uint64_t, llvm::Constant *> DestroyCache;
@@ -121,13 +121,14 @@ public:
class BlockFunction : public BlockBase {
CodeGenModule &CGM;
- CodeGenFunction &CGF;
ASTContext &getContext() const;
protected:
llvm::LLVMContext &VMContext;
public:
+ CodeGenFunction &CGF;
+
const llvm::PointerType *PtrToInt8Ty;
struct HelperInfo {
int index;
@@ -180,7 +181,7 @@ public:
/// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
-
+
/// BlockCXXThisOffset - The offset of the C++ 'this' value within
/// the block structure.
CharUnits BlockCXXThisOffset;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
index ed56bd9..8120217 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -14,12 +14,14 @@
namespace clang {
namespace CodeGen {
- // Don't preserve names on values in an optimized build.
+
+// Don't preserve names on values in an optimized build.
#ifdef NDEBUG
- typedef llvm::IRBuilder<false> CGBuilderTy;
+typedef llvm::IRBuilder<false> CGBuilderTy;
#else
- typedef llvm::IRBuilder<> CGBuilderTy;
+typedef llvm::IRBuilder<> CGBuilderTy;
#endif
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index dd505c2..fff4bac 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -14,6 +14,7 @@
#include "TargetInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
@@ -84,11 +85,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1]));
}
-static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) {
- return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value);
-}
-
-
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
/// which must be a scalar floating point type.
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
@@ -283,9 +279,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
+ llvm::ConstantInt::get(Int32Ty, 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
}
@@ -395,12 +391,68 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = Builder.CreateAnd(Eq, IsNotInf, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()),
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
- // FIXME: LLVM IR Should allow alloca with an i64 size!
Value *Size = EmitScalarExpr(E->getArg(0));
- Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
}
case Builtin::BIbzero:
@@ -411,7 +463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Address,
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -423,10 +475,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(),
SizeVal->getType()),
Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ Address, SrcAddr, SizeVal);
+ return RValue::get(Address);
+ }
+
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
Value *Address = EmitScalarExpr(E->getArg(0));
@@ -435,7 +497,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(),
SizeVal->getType()),
Address, SrcAddr, SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -448,7 +510,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
llvm::Type::getInt8Ty(VMContext)),
SizeVal,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1),
+ llvm::ConstantInt::get(Int32Ty, 1),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0));
return RValue::get(Address);
}
@@ -464,21 +526,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
- return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset)));
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth,
- llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth,
- llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -551,36 +610,45 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
// Otherwise, ask the codegen data what to do.
- const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64);
if (getTargetHooks().extendPointerWithSExt())
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
else
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
}
-#if 0
- // FIXME: Finish/enable when LLVM backend support stabilizes
case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
Value *Buf = EmitScalarExpr(E->getArg(0));
- // Store the frame pointer to the buffer
- Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+
+ // Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
- Builder.CreateCall(FrameAddrF,
- Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
Builder.CreateStore(FrameAddr, Buf);
- // Call the setjmp intrinsic
- Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
- Buf = Builder.CreateBitCast(Buf, DestType);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackSaveSlot =
+ Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
- Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
Value *Buf = EmitScalarExpr(E->getArg(0));
- const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
- Buf = Builder.CreateBitCast(Buf, DestType);
- return RValue::get(Builder.CreateCall(F, Buf));
+ Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext));
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
}
-#endif
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
@@ -870,14 +938,703 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
}
+const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
+ switch (type) {
+ default: break;
+ case 0:
+ case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q);
+ case 6:
+ case 7:
+ case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q);
+ case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q);
+ case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q);
+ case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q);
+ };
+ return 0;
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ SmallVector<Constant*, 16> Indices(nElts, C);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name, bool splat,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ if (splat) {
+ Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j]));
+ Ops.resize(j);
+ }
+ return Builder.CreateCall(F, Ops.begin(), Ops.end(), name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+ bool neg) {
+ ConstantInt *CI = cast<ConstantInt>(V);
+ int SV = CI->getSExtValue();
+
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
+ SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
+ return llvm::ConstantVector::get(CV.begin(), CV.size());
+}
+
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ if (BuiltinID == ARM::BI__clear_cache) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ Value *a = EmitScalarExpr(E->getArg(0));
+ Value *b = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ llvm::StringRef Name = FD->getName();
+ return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name),
+ a, b);
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ assert(BuiltinID > ARM::BI__builtin_thread_pointer);
+
+ llvm::SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ unsigned type = Result.getZExtValue();
+ bool usgn = type & 0x08;
+ bool quad = type & 0x10;
+ bool poly = (type & 0x7) == 5 || (type & 0x7) == 6;
+ bool splat = false;
+
+ const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad);
+ const llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
switch (BuiltinID) {
default: return 0;
+ case ARM::BI__builtin_neon_vaba_v:
+ case ARM::BI__builtin_neon_vabaq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabau : Intrinsic::arm_neon_vabas;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaba");
+ case ARM::BI__builtin_neon_vabal_v:
+ Int = usgn ? Intrinsic::arm_neon_vabalu : Intrinsic::arm_neon_vabals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabal");
+ case ARM::BI__builtin_neon_vabd_v:
+ case ARM::BI__builtin_neon_vabdq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
+ case ARM::BI__builtin_neon_vabdl_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdlu : Intrinsic::arm_neon_vabdls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabdl");
+ case ARM::BI__builtin_neon_vabs_v:
+ case ARM::BI__builtin_neon_vabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
+ Ops, "vabs");
+ case ARM::BI__builtin_neon_vaddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
+ Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vaddl_v:
+ Int = usgn ? Intrinsic::arm_neon_vaddlu : Intrinsic::arm_neon_vaddls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddl");
+ case ARM::BI__builtin_neon_vaddw_v:
+ Int = usgn ? Intrinsic::arm_neon_vaddws : Intrinsic::arm_neon_vaddwu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddw");
+ case ARM::BI__builtin_neon_vcale_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcage_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcageq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcalt_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagt_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagtq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcls_v:
+ case ARM::BI__builtin_neon_vclsq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcls");
+ }
+ case ARM::BI__builtin_neon_vclz_v:
+ case ARM::BI__builtin_neon_vclzq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vclz");
+ }
+ case ARM::BI__builtin_neon_vcnt_v:
+ case ARM::BI__builtin_neon_vcntq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1);
+ return EmitNeonCall(F, Ops, "vcnt");
+ }
+ // FIXME: intrinsics for f16<->f32 convert missing from ARM target.
+ case ARM::BI__builtin_neon_vcvt_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_f32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(VMContext, 4, quad);
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_s32_v:
+ case ARM::BI__builtin_neon_vcvt_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_u32_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad));
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
+ const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_s32_v:
+ case ARM::BI__builtin_neon_vcvt_n_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
+ const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys, 2);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vext_v:
+ case ARM::BI__builtin_neon_vextq_v: {
+ ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
+ int CV = C->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vhadd_v:
+ case ARM::BI__builtin_neon_vhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd");
+ case ARM::BI__builtin_neon_vhsub_v:
+ case ARM::BI__builtin_neon_vhsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
+ Ops, "vld1");
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[0] = Builder.CreateLoad(Ops[0]);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
+ Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v: {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ default: assert(0 && "unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
- case ARM::BI__builtin_thread_pointer: {
- Value *AtomF = CGM.getIntrinsic(Intrinsic::arm_thread_pointer, 0, 0);
- return Builder.CreateCall(AtomF);
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+
+ Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vmax_v:
+ case ARM::BI__builtin_neon_vmaxq_v:
+ Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax");
+ case ARM::BI__builtin_neon_vmin_v:
+ case ARM::BI__builtin_neon_vminq_v:
+ Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
+ case ARM::BI__builtin_neon_vmlal_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmlal_v:
+ Int = usgn ? Intrinsic::arm_neon_vmlalu : Intrinsic::arm_neon_vmlals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
+ case ARM::BI__builtin_neon_vmlsl_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmlsl_v:
+ Int = usgn ? Intrinsic::arm_neon_vmlslu : Intrinsic::arm_neon_vmlsls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlsl", splat);
+ case ARM::BI__builtin_neon_vmovl_v:
+ Int = usgn ? Intrinsic::arm_neon_vmovlu : Intrinsic::arm_neon_vmovls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmovl");
+ case ARM::BI__builtin_neon_vmovn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmovn, &Ty, 1),
+ Ops, "vmovn");
+ case ARM::BI__builtin_neon_vmull_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vmull_v:
+ Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
+ case ARM::BI__builtin_neon_vpadal_v:
+ case ARM::BI__builtin_neon_vpadalq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal");
+ case ARM::BI__builtin_neon_vpadd_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1),
+ Ops, "vpadd");
+ case ARM::BI__builtin_neon_vpaddl_v:
+ case ARM::BI__builtin_neon_vpaddlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl");
+ case ARM::BI__builtin_neon_vpmax_v:
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax");
+ case ARM::BI__builtin_neon_vpmin_v:
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin");
+ case ARM::BI__builtin_neon_vqabs_v:
+ case ARM::BI__builtin_neon_vqabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1),
+ Ops, "vqabs");
+ case ARM::BI__builtin_neon_vqadd_v:
+ case ARM::BI__builtin_neon_vqaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd");
+ case ARM::BI__builtin_neon_vqdmlal_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlal_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1),
+ Ops, "vqdmlal", splat);
+ case ARM::BI__builtin_neon_vqdmlsl_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmlsl_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1),
+ Ops, "vqdmlsl", splat);
+ case ARM::BI__builtin_neon_vqdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmulh_v:
+ case ARM::BI__builtin_neon_vqdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1),
+ Ops, "vqdmulh", splat);
+ case ARM::BI__builtin_neon_vqdmull_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqdmull_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1),
+ Ops, "vqdmull", splat);
+ case ARM::BI__builtin_neon_vqmovn_v:
+ Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn");
+ case ARM::BI__builtin_neon_vqmovun_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqneg_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1),
+ Ops, "vqneg");
+ case ARM::BI__builtin_neon_vqrdmulh_lane_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_lane_v:
+ splat = true;
+ case ARM::BI__builtin_neon_vqrdmulh_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1),
+ Ops, "vqrdmulh", splat);
+ case ARM::BI__builtin_neon_vqrshl_v:
+ case ARM::BI__builtin_neon_vqrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl");
+ case ARM::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1),
+ Ops, "vqrshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqshl_v:
+ case ARM::BI__builtin_neon_vqshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl");
+ case ARM::BI__builtin_neon_vqshl_n_v:
+ case ARM::BI__builtin_neon_vqshlq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false,
+ 1, false);
+ case ARM::BI__builtin_neon_vqshlu_n_v:
+ case ARM::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1),
+ Ops, "vqshlu", 1, false);
+ case ARM::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1),
+ Ops, "vqshrun_n", false, 1, true);
+ case ARM::BI__builtin_neon_vqsub_v:
+ case ARM::BI__builtin_neon_vqsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub");
+ case ARM::BI__builtin_neon_vraddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1),
+ Ops, "vraddhn");
+ case ARM::BI__builtin_neon_vrecpe_v:
+ case ARM::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1),
+ Ops, "vrecpe");
+ case ARM::BI__builtin_neon_vrecps_v:
+ case ARM::BI__builtin_neon_vrecpsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1),
+ Ops, "vrecps");
+ case ARM::BI__builtin_neon_vrhadd_v:
+ case ARM::BI__builtin_neon_vrhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd");
+ case ARM::BI__builtin_neon_vrshl_v:
+ case ARM::BI__builtin_neon_vrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl");
+ case ARM::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1),
+ Ops, "vrshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vrshr_n_v:
+ case ARM::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false,
+ 1, true);
+ case ARM::BI__builtin_neon_vrsqrte_v:
+ case ARM::BI__builtin_neon_vrsqrteq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1),
+ Ops, "vrsqrte");
+ case ARM::BI__builtin_neon_vrsqrts_v:
+ case ARM::BI__builtin_neon_vrsqrtsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1),
+ Ops, "vrsqrts");
+ case ARM::BI__builtin_neon_vrsra_n_v:
+ case ARM::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case ARM::BI__builtin_neon_vrsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1),
+ Ops, "vrsubhn");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ case ARM::BI__builtin_neon_vshl_v:
+ case ARM::BI__builtin_neon_vshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl");
+ case ARM::BI__builtin_neon_vshll_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1);
+ case ARM::BI__builtin_neon_vshl_n_v:
+ case ARM::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ case ARM::BI__builtin_neon_vshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1),
+ Ops, "vshrn_n", false, 1, true);
+ case ARM::BI__builtin_neon_vshr_n_v:
+ case ARM::BI__builtin_neon_vshrq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+ else
+ return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ case ARM::BI__builtin_neon_vsri_n_v:
+ case ARM::BI__builtin_neon_vsriq_n_v:
+ poly = true;
+ case ARM::BI__builtin_neon_vsli_n_v:
+ case ARM::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1),
+ Ops, "vsli_n");
+ case ARM::BI__builtin_neon_vsra_n_v:
+ case ARM::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+ if (usgn)
+ Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+ else
+ Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v:
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
+ Ops, "");
+ case ARM::BI__builtin_neon_vsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
+ Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vsubl_v:
+ Int = usgn ? Intrinsic::arm_neon_vsublu : Intrinsic::arm_neon_vsubls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubl");
+ case ARM::BI__builtin_neon_vsubw_v:
+ Int = usgn ? Intrinsic::arm_neon_vsubws : Intrinsic::arm_neon_vsubwu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubw");
+ case ARM::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case ARM::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case ARM::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case ARM::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case ARM::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case ARM::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case ARM::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case ARM::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ case ARM::BI__builtin_neon_vtst_v:
+ case ARM::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case ARM::BI__builtin_neon_vtrn_v:
+ case ARM::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
+ Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vuzp_v:
+ case ARM::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)));
+ Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
}
}
}
@@ -900,9 +1657,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldi128:
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
- llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
@@ -955,8 +1712,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrldi:
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
+ Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -1009,16 +1766,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_ldmxcsr: {
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
@@ -1033,16 +1790,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
- llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
- llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
+ llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
// cast pointer to i64 & store
@@ -1055,11 +1811,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
-
llvm::SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
- Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
@@ -1069,8 +1823,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1);
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
@@ -1089,11 +1842,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
-
llvm::SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
- Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
@@ -1102,12 +1853,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
- const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
- const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+ const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
- Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
@@ -1132,6 +1881,48 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext));
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "");
+ }
+
// vec_st
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
@@ -1140,12 +1931,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
{
Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext));
- Ops[1] = !isa<Constant>(Ops[1]) || !cast<Constant>(Ops[1])->isNullValue()
- ? Builder.CreateGEP(Ops[2], Ops[1], "tmp") : Ops[2];
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported vavg intrinsic!");
+ default: assert(0 && "Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
index 5258779..7b7be9a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -23,7 +23,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -97,8 +97,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
/// If we don't have a definition for the destructor yet, don't
/// emit. We can't emit aliases to declarations; that's just not
/// how aliases work.
- const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(getContext());
- if (!BaseD->isImplicit() && !BaseD->getBody())
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (!BaseD->isImplicit() && !BaseD->hasBody())
return true;
// If the base is at a non-zero offset, give up.
@@ -166,8 +166,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
// Switch any previous uses to the alias.
- MangleBuffer MangledName;
- getMangledName(MangledName, AliasDecl);
+ llvm::StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
assert(Entry->isDeclaration() && "definition already exists for alias");
@@ -177,7 +176,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
Entry->replaceAllUsesWith(Alias);
Entry->eraseFromParent();
} else {
- Alias->setName(MangledName.getString());
+ Alias->setName(MangledName);
}
// Finally, set up the alias with its proper name and attributes.
@@ -218,8 +217,9 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
- MangleBuffer Name;
- getMangledCXXCtorName(Name, D, Type);
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
if (llvm::GlobalValue *V = GetGlobalValue(Name))
return V;
@@ -227,18 +227,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
FPT->isVariadic());
- return cast<llvm::Function>(
- GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
-}
-
-void CodeGenModule::getMangledName(MangleBuffer &Buffer, const BlockDecl *BD) {
- getMangleContext().mangleBlock(BD, Buffer.getBuffer());
-}
-
-void CodeGenModule::getMangledCXXCtorName(MangleBuffer &Name,
- const CXXConstructorDecl *D,
- CXXCtorType Type) {
- getMangleContext().mangleCXXCtor(D, Type, Name.getBuffer());
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
@@ -286,22 +275,54 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
- MangleBuffer Name;
- getMangledCXXDtorName(Name, D, Type);
+ GlobalDecl GD(D, Type);
+
+ llvm::StringRef Name = getMangledName(GD);
if (llvm::GlobalValue *V = GetGlobalValue(Name))
return V;
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
- return cast<llvm::Function>(
- GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
}
-void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name,
- const CXXDestructorDecl *D,
- CXXDtorType Type) {
- getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer());
+llvm::Constant *
+CodeGenModule::GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+
+ MD = MD->getCanonicalDecl();
+
+ const llvm::Type *PtrDiffTy = Types.ConvertType(Context.getPointerDiffType());
+
+ // Get the function pointer (or index if this is a virtual function).
+ if (MD->isVirtual()) {
+ uint64_t Index = VTables.getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes = Context.Target.getPointerWidth(0) / 8;
+
+ // Itanium C++ ABI 2.3:
+ // For a non-virtual function, this field is a simple function pointer.
+ // For a virtual function, it is 1 plus the virtual table offset
+ // (in bytes) of the function, represented as a ptrdiff_t.
+ return llvm::ConstantInt::get(PtrDiffTy, (Index * PointerWidthInBytes) + 1);
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = PtrDiffTy;
+ }
+
+ llvm::Constant *FuncPtr = GetAddrOfFunction(MD, Ty);
+ return llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
index a7e1871..e1bbb0a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -31,6 +31,7 @@ public:
/// Creates an instance of a C++ ABI class.
CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
}
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
index 73cee3c..3d1e143 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -13,26 +13,22 @@
//===----------------------------------------------------------------------===//
#include "CGCall.h"
+#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Attributes.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetData.h"
-
-#include "ABIInfo.h"
-
using namespace clang;
using namespace CodeGen;
/***/
-// FIXME: Use iterator and sidestep silly type array creation.
-
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
switch (CC) {
default: return llvm::CallingConv::C;
@@ -65,29 +61,31 @@ static CanQualType GetReturnType(QualType RetTy) {
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
+ bool IsRecursive) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
llvm::SmallVector<CanQualType, 16>(),
- FTNP->getExtInfo());
+ FTNP->getExtInfo(), IsRecursive);
}
/// \param Args - contains any initial parameters besides those
/// in the formal type
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CanQual<FunctionProtoType> FTP) {
+ CanQual<FunctionProtoType> FTP,
+ bool IsRecursive = false) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
- return CGT.getFunctionInfo(ResTy, ArgTys,
- FTP->getExtInfo());
+ return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
}
const CGFunctionInfo &
-CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
+ bool IsRecursive) {
llvm::SmallVector<CanQualType, 16> ArgTys;
- return ::getFunctionInfo(*this, ArgTys, FTP);
+ return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -220,7 +218,8 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info) {
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive) {
#ifndef NDEBUG
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
@@ -240,35 +239,65 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy,
+ ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
+ // ABI lowering wants to know what our preferred type for the argument is in
+ // various situations, pass it in.
+ llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
+ for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
+ // If this is being called from the guts of the ConvertType loop, make sure
+ // to call ConvertTypeRecursive so we don't get into issues with cyclic
+ // pointer type structures.
+ PreferredArgTypes.push_back(ConvertTypeRecursive(*I));
+ }
+
// Compute ABI information.
- getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
-
+ getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
+ PreferredArgTypes.data(), PreferredArgTypes.size());
+
+ // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
+ // types, resolve them now. These pointers may point to this function, which
+ // we *just* filled in the FunctionInfo for.
+ if (!IsRecursive && !PointersToResolve.empty()) {
+ // Use PATypeHolder's so that our preferred types don't dangle under
+ // refinement.
+ llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(),
+ PreferredArgTypes.end());
+ HandleLateResolvedPointers();
+ PreferredArgTypes.clear();
+ PreferredArgTypes.append(Handles.begin(), Handles.end());
+ }
+
+
return *FI;
}
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
- bool _NoReturn,
- unsigned _RegParm,
+ bool _NoReturn, unsigned _RegParm,
CanQualType ResTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys)
+ const CanQualType *ArgTys,
+ unsigned NumArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
NoReturn(_NoReturn), RegParm(_RegParm)
{
- NumArgs = ArgTys.size();
- Args = new ArgInfo[1 + NumArgs];
+ NumArgs = NumArgTys;
+
+ // FIXME: Coallocate with the CGFunctionInfo object.
+ Args = new ArgInfo[1 + NumArgTys];
Args[0].type = ResTy;
- for (unsigned i = 0; i < NumArgs; ++i)
+ for (unsigned i = 0; i != NumArgTys; ++i)
Args[1 + i].type = ArgTys[i];
}
/***/
void CodeGenTypes::GetExpandedTypes(QualType Ty,
- std::vector<const llvm::Type*> &ArgTys) {
+ std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
@@ -283,9 +312,9 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
QualType FT = FD->getType();
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
- GetExpandedTypes(FT, ArgTys);
+ GetExpandedTypes(FT, ArgTys, IsRecursive);
} else {
- ArgTys.push_back(ConvertType(FT));
+ ArgTys.push_back(ConvertType(FT, IsRecursive));
}
}
}
@@ -345,6 +374,71 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
}
}
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static llvm::Value *
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+ const llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ const llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it.
+ uint64_t FirstEltSize =
+ CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ const llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy)
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
/// a pointer to an object of type \arg Ty.
///
@@ -356,9 +450,28 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
- uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(SrcPtr);
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+ SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
// If load is legal, just bitcast the src pointer.
if (SrcSize >= DstSize) {
// Generally SrcSize is never greater than DstSize, since this means we are
@@ -373,18 +486,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// FIXME: Use better alignment / avoid requiring aligned load.
Load->setAlignment(1);
return Load;
- } else {
- // Otherwise do coercion through memory. This is stupid, but
- // simple.
- llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
- llvm::Value *Casted =
- CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
- llvm::StoreInst *Store =
- CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
- // FIXME: Use better alignment / avoid requiring aligned store.
- Store->setAlignment(1);
- return CGF.Builder.CreateLoad(Tmp);
}
+
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
}
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
@@ -399,8 +512,27 @@ static void CreateCoercedStore(llvm::Value *Src,
const llvm::Type *SrcTy = Src->getType();
const llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
-
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+ DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
@@ -432,10 +564,28 @@ static void CreateCoercedStore(llvm::Value *Src,
/***/
-bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return FI.getReturnInfo().isIndirect();
}
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().Target.useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = getFunctionInfo(GD);
@@ -445,11 +595,12 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
Variadic = FPT->isVariadic();
- return GetFunctionType(FI, Variadic);
+ return GetFunctionType(FI, Variadic, false);
}
const llvm::FunctionType *
-CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
+ bool IsRecursive) {
std::vector<const llvm::Type*> ArgTys;
const llvm::Type *ResultType = 0;
@@ -462,13 +613,13 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = ConvertType(RetTy);
+ ResultType = ConvertType(RetTy, IsRecursive);
break;
case ABIArgInfo::Indirect: {
assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
ResultType = llvm::Type::getVoidTy(getLLVMContext());
- const llvm::Type *STy = ConvertType(RetTy);
+ const llvm::Type *STy = ConvertType(RetTy, IsRecursive);
ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
break;
}
@@ -490,24 +641,34 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Coerce:
- ArgTys.push_back(AI.getCoerceToType());
+ case ABIArgInfo::Coerce: {
+ // If the coerce-to type is a first class aggregate, flatten it. Either
+ // way is semantically identical, but fast-isel and the optimizer
+ // generally likes scalar values better than FCAs.
+ const llvm::Type *ArgTy = AI.getCoerceToType();
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
+ ArgTys.push_back(STy->getElementType(i));
+ } else {
+ ArgTys.push_back(ArgTy);
+ }
break;
+ }
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ArgTys.push_back(ConvertType(it->type));
+ ArgTys.push_back(ConvertType(it->type, IsRecursive));
break;
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, ArgTys);
+ GetExpandedTypes(it->type, ArgTys, IsRecursive);
break;
}
}
@@ -515,28 +676,12 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
}
-static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
- if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
- if (!TT->getDecl()->isDefinition())
- return true;
- }
-
- for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
- if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
- if (!TT->getDecl()->isDefinition())
- return true;
- }
- }
-
- return false;
-}
-
const llvm::Type *
CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
- return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
+ if (!VerifyFuncTypeComplete(FPT))
+ return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
return llvm::OpaqueType::get(getLLVMContext());
}
@@ -557,6 +702,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (TargetDecl) {
if (TargetDecl->hasAttr<NoThrowAttr>())
FuncAttrs |= llvm::Attribute::NoUnwind;
+ else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
+ if (FPT && FPT->hasEmptyExceptionSpec())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+
if (TargetDecl->hasAttr<NoReturnAttr>())
FuncAttrs |= llvm::Attribute::NoReturn;
if (TargetDecl->hasAttr<ConstAttr>())
@@ -626,7 +777,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
switch (AI.getKind()) {
case ABIArgInfo::Coerce:
- break;
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements();
+ else
+ ++Index;
+ continue; // Skip index increment.
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
@@ -666,7 +822,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, Tys);
+ getTypes().GetExpandedTypes(ParamType, Tys, false);
Index += Tys.size();
continue;
}
@@ -687,7 +843,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
// return statements.
- if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getResultType().getUnqualifiedType();
const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
@@ -703,7 +859,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function::arg_iterator AI = Fn->arg_begin();
// Name the struct return argument.
- if (CGM.ReturnTypeUsesSret(FI)) {
+ if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
++AI;
}
@@ -719,7 +875,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
switch (ArgI.getKind()) {
case ABIArgInfo::Indirect: {
- llvm::Value* V = AI;
+ llvm::Value *V = AI;
if (hasAggregateLLVMType(Ty)) {
// Do nothing, aggregates and complex variables are accessed by
// reference.
@@ -739,7 +895,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
assert(AI != Fn->arg_end() && "Argument mismatch!");
- llvm::Value* V = AI;
+ llvm::Value *V = AI;
if (hasAggregateLLVMType(Ty)) {
// Create a temporary alloca to hold the argument; the rest of
// codegen expects to access aggregates & complex values by
@@ -789,12 +945,35 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Coerce: {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
// FIXME: This is very wasteful; EmitParmDecl is just going to drop the
// result in a new alloca anyway, so we could just store into that
// directly if we broke the abstraction down more.
- llvm::Value *V = CreateMemTemp(Ty, "coerce");
- CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
+ Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity());
+ llvm::Value *V = Alloca;
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
+ llvm::Value *Ptr = V;
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ }
+
+
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
V = EmitLoadOfScalar(V, false, Ty);
@@ -805,7 +984,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
EmitParmDecl(*Arg, V);
- break;
+ continue; // Skip ++AI increment, already done.
}
}
@@ -814,52 +993,73 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(AI == Fn->arg_end() && "Argument mismatch!");
}
-void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
- llvm::Value *ReturnValue) {
- llvm::Value *RV = 0;
-
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Functions with no result always return void.
- if (ReturnValue) {
- QualType RetTy = FI.getReturnType();
- const ABIArgInfo &RetAI = FI.getReturnInfo();
-
- switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
- if (RetTy->isAnyComplexType()) {
- ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
- StoreComplexToAddr(RT, CurFn->arg_begin(), false);
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- // Do nothing; aggregrates get evaluated directly into the destination.
- } else {
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
- false, RetTy);
- }
- break;
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- // The internal return value temp always will have
- // pointer-to-return-type type.
- RV = Builder.CreateLoad(ReturnValue);
- break;
+ if (ReturnValue == 0) {
+ Builder.CreateRetVoid();
+ return;
+ }
- case ABIArgInfo::Ignore:
- break;
+ llvm::MDNode *RetDbgInfo = 0;
+ llvm::Value *RV = 0;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
- case ABIArgInfo::Coerce:
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
- break;
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, RetTy);
+ }
+ break;
- case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgInfo = SI->getDbgMetadata();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
}
+ break;
}
+ case ABIArgInfo::Ignore:
+ break;
- if (RV) {
- Builder.CreateRet(RV);
- } else {
- Builder.CreateRetVoid();
+ case ABIArgInfo::Coerce:
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
}
+
+ llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ if (RetDbgInfo)
+ Ret->setDbgMetadata(RetDbgInfo);
}
RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
@@ -894,11 +1094,29 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
if (ArgType->isReferenceType())
- return EmitReferenceBindingToExpr(E);
+ return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
return EmitAnyExprToTemp(E);
}
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ if (!InvokeDest)
+ return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
+ ArgBegin, ArgEnd, Name);
+ EmitBlock(ContBB);
+ return Invoke;
+}
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
@@ -916,7 +1134,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
- if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
llvm::Value *Value = ReturnValue.getValue();
if (!Value)
Value = CreateMemTemp(RetTy);
@@ -973,8 +1191,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- *this));
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
+ Args.push_back(Builder.CreateLoad(EltPtr));
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ }
+
break;
}
@@ -1014,15 +1248,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
- llvm::BasicBlock *InvokeDest = getInvokeDest();
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
AttributeList.end());
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
llvm::CallSite CS;
- if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ if (!InvokeDest) {
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
@@ -1030,9 +1267,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.data(), Args.data()+Args.size());
EmitBlock(Cont);
}
- if (callOrInvoke) {
+ if (callOrInvoke)
*callOrInvoke = CS.getInstruction();
- }
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
index 31c8aac..41e707a 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -83,11 +83,9 @@ namespace CodeGen {
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(unsigned CallingConvention,
- bool NoReturn,
- unsigned RegParm,
- CanQualType ResTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ CGFunctionInfo(unsigned CallingConvention, bool NoReturn,
+ unsigned RegParm, CanQualType ResTy,
+ const CanQualType *ArgTys, unsigned NumArgTys);
~CGFunctionInfo() { delete[] Args; }
const_arg_iterator arg_begin() const { return Args + 1; }
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
index bebea54..c50fe90 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -340,9 +340,9 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
- CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext());
+ CXXDestructorDecl *DD = BaseClassDecl->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
}
}
@@ -354,7 +354,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
QualType T,
unsigned Index) {
if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
llvm::Value *Dest = LHS.getAddress();
if (ArrayIndexVar) {
@@ -410,7 +410,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
{
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
@@ -461,13 +461,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// was implicitly generated, we shouldn't be zeroing memory.
RValue RHS;
if (FieldType->isReferenceType()) {
- RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(),
- /*IsInitializer=*/true);
+ RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field);
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
} else if (FieldType->isArrayType() && !MemberInit->getInit()) {
CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
- RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true));
+ RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit()));
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
} else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
@@ -535,12 +534,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
- CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext());
+ CXXDestructorDecl *DD = RD->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false,
LHS.getAddress());
}
@@ -607,13 +606,11 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Enter the function-try-block before the constructor prologue if
// applicable.
- CXXTryStmtInfo TryInfo;
bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
-
if (IsTryBody)
- TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- unsigned CleanupStackSize = CleanupEntries.size();
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
// Emit the constructor prologue, i.e. the base and member
// initializers.
@@ -629,10 +626,10 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// initializers, which includes (along the exceptional path) the
// destructors for those members and bases that were fully
// constructed.
- EmitCleanupBlocks(CleanupStackSize);
+ PopCleanupBlocks(CleanupDepth);
if (IsTryBody)
- ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
/// EmitCtorPrologue - This routine generates necessary code to initialize
@@ -649,9 +646,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
B != E; ++B) {
CXXBaseOrMemberInitializer *Member = (*B);
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
else
@@ -660,12 +654,8 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
InitializeVTablePointers(ClassDecl);
- for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
- }
}
/// EmitDestructorBody - Emits the body of the current destructor.
@@ -679,14 +669,33 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// anything else --- unless we're in a deleting destructor, in which
// case we're just going to call the complete destructor and then
// call operator delete() on the way out.
- CXXTryStmtInfo TryInfo;
bool isTryBody = (DtorType != Dtor_Deleting &&
Body && isa<CXXTryStmt>(Body));
if (isTryBody)
- TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
- PushCleanupBlock(DtorEpilogue);
+ // Emit the destructor epilogue now. If this is a complete
+ // destructor with a function-try-block, perform the base epilogue
+ // as well.
+ //
+ // FIXME: This isn't really right, because an exception in the
+ // non-EH epilogue should jump to the appropriate place in the
+ // EH epilogue.
+ {
+ CleanupBlock Cleanup(*this, NormalCleanup);
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+
+ if (Exceptions) {
+ Cleanup.beginEHCleanup();
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+ }
+ }
bool SkipBody = false; // should get jump-threaded
@@ -725,27 +734,12 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// nothing to do besides what's in the epilogue
}
- // Jump to the cleanup block.
- CleanupBlockInfo Info = PopCleanupBlock();
- assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
- EmitBlock(DtorEpilogue);
-
- // Emit the destructor epilogue now. If this is a complete
- // destructor with a function-try-block, perform the base epilogue
- // as well.
- if (isTryBody && DtorType == Dtor_Complete)
- EmitDtorEpilogue(Dtor, Dtor_Base);
- EmitDtorEpilogue(Dtor, DtorType);
-
- // Link up the cleanup information.
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ // We're done with the epilogue cleanup.
+ PopCleanupBlock();
// Exit the try if applicable.
if (isTryBody)
- ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
@@ -784,7 +778,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
// Ignore trivial destructors.
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
llvm::Value *V =
GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(),
ClassDecl, BaseClassDecl,
@@ -839,10 +833,10 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
Array, BaseAddrPtr);
} else
- EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
Dtor_Complete, /*ForVirtualBase=*/false,
LHS.getAddress());
}
@@ -863,7 +857,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
llvm::Value *V =
GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl,
BaseClassDecl,
@@ -940,7 +934,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
// Keep track of the current number of live temporaries.
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
ArgBeg, ArgEnd);
@@ -1033,51 +1027,6 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
EmitBlock(AfterFor, true);
}
-/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
-/// invoked, calls the default destructor on array elements in reverse order of
-/// construction.
-llvm::Constant *
-CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This) {
- FunctionArgList Args;
- ImplicitParamDecl *Dst =
- ImplicitParamDecl::Create(getContext(), 0,
- SourceLocation(), 0,
- getContext().getPointerType(getContext().VoidTy));
- Args.push_back(std::make_pair(Dst, Dst->getType()));
-
- llvm::SmallString<16> Name;
- llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
- QualType R = getContext().VoidTy;
- const CGFunctionInfo &FI
- = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
- const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name.str(),
- &CGM.getModule());
- IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
- false, true);
- StartFunction(FD, R, Fn, Args, SourceLocation());
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- FinishFunction();
- llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
- 0);
- llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
- return m;
-}
-
-
void
CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
@@ -1160,6 +1109,23 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+
+ CleanupBlock Scope(*this, NormalCleanup);
+
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+
+ if (Exceptions) {
+ Scope.beginEHCleanup();
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+ }
+}
+
llvm::Value *
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
index c9bcb1b..4e15895 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -21,7 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Instructions.h"
@@ -536,6 +536,19 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Context.getPointerType(Context.getTagDeclType(Method->getParent()));
llvm::DIType ThisPtrType =
DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+
+ unsigned Quals = Method->getTypeQualifiers();
+ if (Quals & Qualifiers::Const)
+ ThisPtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_const_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, ThisPtrType);
+ if (Quals & Qualifiers::Volatile)
+ ThisPtrType =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_volatile_type,
+ Unit, "", Unit,
+ 0, 0, 0, 0, 0, ThisPtrType);
+
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
@@ -567,9 +580,9 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
- MangleBuffer MethodLinkageName;
+ llvm::StringRef MethodLinkageName;
if (!IsCtorOrDtor)
- CGM.getMangledName(MethodLinkageName, Method);
+ MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
@@ -598,7 +611,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
MethodLinkageName,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
- Method->isThisDeclarationADefinition(),
+ /* isDefintion=*/ false,
Virtuality, VIndex, ContainingType);
// Don't cache ctors or dtors since we have to emit multiple functions for
@@ -758,22 +771,30 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition()) {
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(),
+ DefUnit, Line, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+
+ return FwdDecl;
+ }
// A RD->getName() is not unique. However, the debug info descriptors
// are uniqued so use type name to ensure uniquness.
llvm::SmallString<128> FwdDeclName;
llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++;
- llvm::DIDescriptor FDContext =
- getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag, FDContext, FwdDeclName,
DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray());
- // If this is just a forward declaration, return it.
- if (!RD->getDefinition())
- return FwdDecl;
-
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
// Otherwise, insert it into the TypeCache so that recursive uses will find
@@ -1289,7 +1310,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
CGBuilderTy &Builder) {
llvm::StringRef Name;
- MangleBuffer LinkageName;
+ llvm::StringRef LinkageName;
const Decl *D = GD.getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
@@ -1307,11 +1328,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
- CGM.getMangledName(LinkageName, GD);
+ LinkageName = CGM.getMangledName(GD);
} else {
// Use llvm function name as linkage name.
Name = Fn->getName();
- LinkageName.setString(Name);
+ LinkageName = Name;
}
if (!Name.empty() && Name[0] == '\01')
Name = Name.substr(1);
@@ -1477,7 +1498,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::DIVariable D =
DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
VD->getName(),
- Unit, Line, Ty);
+ Unit, Line, Ty, CGM.getLangOptions().Optimize);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
index 07edca0..1a62ea9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -20,7 +20,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Intrinsics.h"
#include "llvm/Target/TargetData.h"
@@ -38,7 +38,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ClassTemplatePartialSpecialization:
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
- case Decl::NonTypeTemplateParm:
+ case Decl::NonTypeTemplateParm:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
@@ -59,6 +59,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ObjCImplementation:
case Decl::ObjCProperty:
case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
case Decl::ObjCClass:
@@ -138,16 +139,14 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
if (CGF.getContext().getLangOptions().CPlusPlus) {
- MangleBuffer Name;
- CGM.getMangledName(Name, &D);
- return Name.getString().str();
+ llvm::StringRef Name = CGM.getMangledName(&D);
+ return Name.str();
}
std::string ContextName;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
- MangleBuffer Name;
- CGM.getMangledName(Name, FD);
- ContextName = Name.getString().str();
+ llvm::StringRef Name = CGM.getMangledName(FD);
+ ContextName = Name.str();
} else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
ContextName = CGF.CurFn->getName();
else
@@ -328,10 +327,10 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
// int32_t __flags;
- Types.push_back(llvm::Type::getInt32Ty(VMContext));
+ Types.push_back(Int32Ty);
// int32_t __size;
- Types.push_back(llvm::Type::getInt32Ty(VMContext));
+ Types.push_back(Int32Ty);
bool HasCopyAndDispose = BlockRequiresCopying(Ty);
if (HasCopyAndDispose) {
@@ -389,10 +388,63 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
return Info.first;
}
+namespace {
+ struct CallArrayDtor : EHScopeStack::LazyCleanup {
+ CallArrayDtor(const CXXDestructorDecl *Dtor,
+ const ConstantArrayType *Type,
+ llvm::Value *Loc)
+ : Dtor(Dtor), Type(Type), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ const ConstantArrayType *Type;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
+ CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
+ }
+ };
+
+ struct CallVarDtor : EHScopeStack::LazyCleanup {
+ CallVarDtor(const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag,
+ llvm::Value *Loc)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = !IsForEH && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+}
+
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
-void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
@@ -490,7 +542,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
{
// Push a cleanup block and restore the stack there.
- DelayedCleanupBlock scope(*this);
+ CleanupBlock scope(*this, NormalCleanup);
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
@@ -505,10 +557,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
llvm::Value *VLASize = EmitVLASize(Ty);
- // Downcast the VLA size expression
- VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
- false, "tmp");
-
// Allocate memory for the array.
llvm::AllocaInst *VLA =
Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
@@ -573,18 +621,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
int isa = 0;
if (flag&BLOCK_FIELD_IS_WEAK)
isa = 1;
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
+ V = llvm::ConstantInt::get(Int32Ty, isa);
V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
Builder.CreateStore(V, isa_field);
Builder.CreateStore(DeclPtr, forwarding_field);
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
+ V = llvm::ConstantInt::get(Int32Ty, flags);
Builder.CreateStore(V, flags_field);
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
- V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ V = llvm::ConstantInt::get(Int32Ty,
CGM.GetTargetTypeStoreSize(V1).getQuantity());
Builder.CreateStore(V, size_field);
@@ -602,7 +650,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
}
- if (Init) {
+ if (SpecialInit) {
+ SpecialInit(*this, D, DeclPtr);
+ } else if (Init) {
llvm::Value *Loc = DeclPtr;
if (isByRef)
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
@@ -618,8 +668,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
assert(Init != 0 && "Wasn't a simple constant init?");
llvm::Value *AlignVal =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- Align.getQuantity());
+ llvm::ConstantInt::get(Int32Ty, Align.getQuantity());
const llvm::Type *IntPtr =
llvm::IntegerType::get(VMContext, LLVMPointerWidth);
llvm::Value *SizeVal =
@@ -658,7 +707,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
Loc, SrcPtr, SizeVal, AlignVal, NotVolatile);
}
} else if (Ty->isReferenceType()) {
- RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
@@ -669,7 +718,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
EmitAggExpr(Init, Loc, isVolatile);
}
}
-
+
// Handle CXX destruction of variables.
QualType DtorTy(Ty);
while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
@@ -684,60 +733,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
D.getNameAsString());
- const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
if (const ConstantArrayType *Array =
getContext().getAsConstantArrayType(Ty)) {
- {
- DelayedCleanupBlock Scope(*this);
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
- }
+ EHStack.pushLazyCleanup<CallArrayDtor>(NormalAndEHCleanup,
+ D, Array, Loc);
} else {
- {
- // Normal destruction.
- DelayedCleanupBlock Scope(*this);
-
- if (NRVO) {
- // If we exited via NRVO, we skip the destructor call.
- llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
- Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
- Scope.getCleanupExitBlock(),
- NoNRVO);
- EmitBlock(NoNRVO);
- }
-
- // We don't call the destructor along the normal edge if we're
- // applying the NRVO.
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
-
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
- }
+ EHStack.pushLazyCleanup<CallVarDtor>(NormalAndEHCleanup,
+ D, NRVOFlag, Loc);
}
}
}
@@ -758,17 +763,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
//
// To fix this we insert a bitcast here.
QualType ArgTy = Info.arg_begin()->type;
- {
- DelayedCleanupBlock scope(*this);
- CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
- ConvertType(ArgTy))),
- getContext().getPointerType(D.getType())));
- EmitCall(Info, F, ReturnValueSlot(), Args);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ // Normal cleanup.
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+ ConvertType(ArgTy))),
+ getContext().getPointerType(D.getType())));
+ EmitCall(Info, F, ReturnValueSlot(), Args);
+
+ // EH cleanup.
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
CallArgList Args;
Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
@@ -779,15 +786,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
- {
- DelayedCleanupBlock scope(*this);
- llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- V = Builder.CreateLoad(V);
- BuildBlockRelease(V);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ BuildBlockRelease(V);
+
// FIXME: Turn this on and audit the codegen
if (0 && Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
+
llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
V = Builder.CreateLoad(V);
BuildBlockRelease(V);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
index f94ddd9..ec3f386 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
using namespace clang;
@@ -66,16 +66,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
if (RD->hasTrivialDestructor())
return;
- CXXDestructorDecl *Dtor = RD->getDestructor(Context);
+ CXXDestructorDecl *Dtor = RD->getDestructor();
llvm::Constant *DtorFn;
if (Array) {
DtorFn =
- CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
- Array,
- DeclPtr);
+ CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array,
+ DeclPtr);
const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
} else
DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
@@ -94,13 +93,9 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitDeclDestroy(*this, D, DeclPtr);
return;
}
- if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
- RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
- EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
- return;
- }
- ErrorUnsupported(Init,
- "global variable that binds reference to a non-lvalue");
+
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
}
void
@@ -144,6 +139,25 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
}
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ const llvm::FunctionType *FTy,
+ llvm::StringRef Name) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().Target.getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+
+ if (!CGM.getLangOptions().Exceptions)
+ Fn->setDoesNotThrow();
+
+ return Fn;
+}
+
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
const llvm::FunctionType *FTy
@@ -152,17 +166,22 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
// Create a variable initialization function.
llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "__cxx_global_var_init", &TheModule);
+ CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
- CXXGlobalInits.push_back(Fn);
+ if (D->hasAttr<InitPriorityAttr>()) {
+ unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
+ OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ }
+ else
+ CXXGlobalInits.push_back(Fn);
}
void
CodeGenModule::EmitCXXGlobalInitFunc() {
- if (CXXGlobalInits.empty())
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
const llvm::FunctionType *FTy
@@ -170,21 +189,30 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
false);
// Create our global initialization function.
- llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "_GLOBAL__I_a", &TheModule);
-
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
- &CXXGlobalInits[0],
- CXXGlobalInits.size());
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ llvm::SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
+ llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
+ LocalCXXGlobalInits.push_back(Fn);
+ }
+ for (unsigned i = 0; i < CXXGlobalInits.size(); i++)
+ LocalCXXGlobalInits.push_back(CXXGlobalInits[i]);
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &LocalCXXGlobalInits[0],
+ LocalCXXGlobalInits.size());
+ }
+ else
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
AddGlobalCtor(Fn);
}
-void CodeGenModule::AddCXXDtorEntry(llvm::Constant *DtorFn,
- llvm::Constant *Object) {
- CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
-}
-
void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
@@ -195,8 +223,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
// Create our global destructor function.
llvm::Function *Fn =
- llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- "_GLOBAL__D_a", &TheModule);
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
@@ -226,14 +253,14 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
}
void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::Constant*, llvm::Constant*> >
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
- llvm::Constant *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
llvm::CallInst *CI = Builder.CreateCall(Callee,
DtorsAndObjects[e - i - 1].second);
// Make sure the call and the callee agree on calling convention.
@@ -301,7 +328,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);
// Create the guard variable.
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext);
llvm::GlobalValue *GuardVariable =
new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
false, GV->getLinkage(),
@@ -324,8 +350,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
- llvm::BasicBlock *SavedLandingPad = 0;
- llvm::BasicBlock *LandingPad = 0;
if (ThreadsafeStatics) {
// Call __cxa_guard_acquire.
V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
@@ -335,10 +359,10 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
InitBlock, EndBlock);
+ // Call __cxa_guard_abort along the exceptional edge.
if (Exceptions) {
- SavedLandingPad = getInvokeDest();
- LandingPad = createBasicBlock("guard.lpad");
- setInvokeDest(LandingPad);
+ CleanupBlock Cleanup(*this, EHCleanup);
+ Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
}
EmitBlock(InitBlock);
@@ -346,17 +370,14 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
if (D.getType()->isReferenceType()) {
QualType T = D.getType();
- // We don't want to pass true for IsInitializer here, because a static
- // reference to a temporary does not extend its lifetime.
- RValue RV = EmitReferenceBindingToExpr(D.getInit(),
- /*IsInitializer=*/false);
+ RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);
} else
EmitDeclInit(*this, D, GV);
if (ThreadsafeStatics) {
- // Call __cxa_guard_release.
+ // Call __cxa_guard_release. This cannot throw.
Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
} else {
llvm::Value *One =
@@ -368,57 +389,39 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
if (!D.getType()->isReferenceType())
EmitDeclDestroy(*this, D, GV);
- if (ThreadsafeStatics && Exceptions) {
- // If an exception is thrown during initialization, call __cxa_guard_abort
- // along the exceptional edge.
- EmitBranch(EndBlock);
-
- // Construct the landing pad.
- EmitBlock(LandingPad);
-
- // Personality function and LLVM intrinsics.
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- // Call the selector function.
- const llvm::PointerType *PtrToInt8Ty
- = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::Value* SelectorArgs[3] = { Exc, Personality, Null };
- Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3,
- "selector");
- Builder.CreateStore(Exc, RethrowPtr);
-
- // Call __cxa_guard_abort along the exceptional edge.
- Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
-
- setInvokeDest(SavedLandingPad);
-
- // Rethrow the current exception.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
-
- Builder.CreateUnreachable();
- }
-
EmitBlock(EndBlock);
}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Function *
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ FunctionArgList Args;
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args,
+ FunctionType::ExtInfo());
+ const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, Args, SourceLocation());
+
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo();
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+
+ FinishFunction();
+
+ return Fn;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
index ddc1c77..4980aad 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -14,11 +14,194 @@
#include "clang/AST/StmtCXX.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
+
using namespace clang;
using namespace CodeGen;
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ if (isa<EHLazyCleanupScope>(*it)) {
+ if (cast<EHLazyCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHLazyCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHLazyCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind != EHCleanup;
+ bool IsEHCleanup = Kind != NormalCleanup;
+ EHLazyCleanupScope *Scope =
+ new (Buffer) EHLazyCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHCleanup = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit) {
+ char *Buffer = allocate(EHCleanupScope::getSize());
+ new (Buffer) EHCleanupScope(BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup,
+ NormalEntry, NormalExit, EHEntry, EHExit);
+ if (NormalEntry)
+ InnermostNormalCleanup = stable_begin();
+ if (EHEntry)
+ InnermostEHCleanup = stable_begin();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ if (isa<EHLazyCleanupScope>(*begin())) {
+ EHLazyCleanupScope &Cleanup = cast<EHLazyCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+ } else {
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += EHCleanupScope::getSize();
+ }
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ return new (Buffer) EHCatchScope(NumHandlers);
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize;
+ if (isa<EHCleanupScope>(*it))
+ MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ else
+ MinSize = cast<EHLazyCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void EHScopeStack::resolveBranchFixups(llvm::BasicBlock *Dest) {
+ assert(Dest && "null block passed to resolveBranchFixups");
+
+ if (BranchFixups.empty()) return;
+ assert(hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ for (unsigned I = 0, E = BranchFixups.size(); I != E; ++I)
+ if (BranchFixups[I].Destination == Dest)
+ BranchFixups[I].Destination = 0;
+
+ popNullFixups();
+}
+
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
@@ -66,8 +249,19 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
- // void* __cxa_begin_catch();
+ // void *__cxa_begin_catch(void*);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
@@ -123,25 +317,114 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
}
-static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) {
- const char *PersonalityFnName = "__gcc_personality_v0";
- LangOptions Opts = CGM.getLangOptions();
- if (Opts.CPlusPlus)
- PersonalityFnName = "__gxx_personality_v0";
- else if (Opts.ObjC1) {
- if (Opts.NeXTRuntime) {
- if (Opts.ObjCNonFragileABI)
- PersonalityFnName = "__gcc_personality_v0";
- } else
- PersonalityFnName = "__gnu_objc_personality_v0";
+static const char *getCPersonalityFn(CodeGenFunction &CGF) {
+ return "__gcc_personality_v0";
+}
+
+static const char *getObjCPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+ else
+ return getCPersonalityFn(CGF);
+ } else {
+ return "__gnu_objc_personality_v0";
}
+}
+
+static const char *getCXXPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().SjLjExceptions)
+ return "__gxx_personality_sj0";
+ else
+ return "__gxx_personality_v0";
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const char *getObjCXXPersonalityFn(CodeGenFunction &CGF) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ else
+ return getCXXPersonalityFn(CGF);
+ }
+
+ // I'm pretty sure the GNU runtime doesn't support mixed EH.
+ // TODO: we don't necessarily need mixed EH here; remember what
+ // kind of exceptions we actually try to catch in this function.
+ CGF.CGM.ErrorUnsupported(CGF.CurCodeDecl,
+ "the GNU Objective C runtime does not support "
+ "catching C++ and Objective C exceptions in the "
+ "same function");
+ // Use the C++ personality just to avoid returning null.
+ return getCXXPersonalityFn(CGF);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF) {
+ const char *Name;
+ const LangOptions &Opts = CGF.CGM.getLangOptions();
+ if (Opts.CPlusPlus && Opts.ObjC1)
+ Name = getObjCXXPersonalityFn(CGF);
+ else if (Opts.CPlusPlus)
+ Name = getCXXPersonalityFn(CGF);
+ else if (Opts.ObjC1)
+ Name = getObjCPersonalityFn(CGF);
+ else
+ Name = getCPersonalityFn(CGF);
llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(
- CGM.getLLVMContext()),
- true),
- PersonalityFnName);
- return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty);
+ CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(
+ CGF.CGM.getLLVMContext()),
+ true),
+ Name);
+ return llvm::ConstantExpr::getBitCast(Personality, CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a cleanup.
+static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
+ return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeExceptionCleanup : EHScopeStack::LazyCleanup {
+ FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
+ llvm::Value *ExnLocVar)
+ : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
+
+ llvm::Value *ShouldFreeVar;
+ llvm::Value *ExnLocVar;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
+ llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
+
+ llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
+ "should-free-exnobj");
+ CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
+ CGF.EmitBlock(FreeBB);
+ llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ ->setDoesNotThrow();
+ CGF.EmitBlock(DoneBB);
+ }
+ };
}
// Emits an exception expression into the given location. This
@@ -166,21 +449,14 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
llvm::AllocaInst *ExnLocVar =
CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
- llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest();
- {
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
- llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
- llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
-
- llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar,
- "should-free-exnobj");
- CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
- CGF.EmitBlock(FreeBB);
- llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal);
- CGF.EmitBlock(DoneBB);
- }
- llvm::BasicBlock *Cleanup = CGF.getInvokeDest();
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ // FIXME: stmt expressions might require this to be a normal
+ // cleanup, too.
+ CGF.EHStack.pushLazyCleanup<FreeExceptionCleanup>(EHCleanup,
+ ShouldFreeVar,
+ ExnLocVar);
+ EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
@@ -203,74 +479,38 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
ShouldFreeVar);
- // Pop the cleanup block if it's still the top of the cleanup stack.
- // Otherwise, temporaries have been created and our cleanup will get
- // properly removed in time.
- // TODO: this is not very resilient.
- if (CGF.getInvokeDest() == Cleanup)
- CGF.setInvokeDest(SavedInvokeDest);
-}
-
-// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
-// N is casted to the right type.
-static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
- bool WasPointer, bool WasPointerReference,
- llvm::Value *E, llvm::Value *N) {
- // Store the throw exception in the exception object.
- if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
- llvm::Value *Value = E;
- if (!WasPointer)
- Value = CGF.Builder.CreateLoad(Value);
- const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
- if (WasPointerReference) {
- llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
- CGF.Builder.CreateStore(Value, Tmp);
- Value = Tmp;
- ValuePtrTy = Value->getType()->getPointerTo(0);
- }
- N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
- CGF.Builder.CreateStore(Value, N);
- } else {
- const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
- const CXXRecordDecl *RD;
- RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
- llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
- if (RD->hasTrivialCopyConstructor()) {
- CGF.EmitAggregateCopy(This, E, ObjectType);
- } else if (CXXConstructorDecl *CopyCtor
- = RD->getCopyConstructor(CGF.getContext(), 0)) {
- llvm::Value *Src = E;
-
- // Stolen from EmitClassAggrMemberwiseCopy
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
- Ctor_Complete);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(This),
- CopyCtor->getThisType(CGF.getContext())));
-
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- CopyCtor->getParamDecl(0)->getType()));
-
- const FunctionProtoType *FPT
- = CopyCtor->getType()->getAs<FunctionProtoType>();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, CopyCtor);
- } else
- llvm_unreachable("uncopyable object");
+ // Technically, the exception object is like a temporary; it has to
+ // be cleaned up when its full-expression is complete.
+ // Unfortunately, the AST represents full-expressions by creating a
+ // CXXExprWithTemporaries, which it only does when there are actually
+ // temporaries.
+ //
+ // If any cleanups have been added since we pushed ours, they must
+ // be from temporaries; this will get popped at the same time.
+ // Otherwise we need to pop ours off. FIXME: this is very brittle.
+ if (Cleanup == CGF.EHStack.stable_begin())
+ CGF.PopCleanupBlock();
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot) {
+ const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext());
+ ExceptionSlot = CreateTempAlloca(i8p, "exn.slot");
}
+ return ExceptionSlot;
}
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!E->getSubExpr()) {
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest())
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
->setDoesNotReturn();
- EmitBlock(Cont);
- } else
+ } else {
Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
- Builder.CreateUnreachable();
+ Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
@@ -284,10 +524,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
- llvm::Value *ExceptionPtr =
+ llvm::CallInst *ExceptionPtr =
Builder.CreateCall(AllocExceptionFn,
llvm::ConstantInt::get(SizeTy, TypeSize),
"exception");
+ ExceptionPtr->setDoesNotThrow();
EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
@@ -301,7 +542,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
- CXXDestructorDecl *DtorD = Record->getDestructor(getContext());
+ CXXDestructorDecl *DtorD = Record->getDestructor();
Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
}
@@ -309,18 +550,17 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
llvm::InvokeInst *ThrowCall =
- Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(),
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
- EmitBlock(Cont);
} else {
llvm::CallInst *ThrowCall =
Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
}
- Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
@@ -346,80 +586,15 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
-
- llvm::BasicBlock *PrevLandingPad = getInvokeDest();
- llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler");
- llvm::BasicBlock *Match = createBasicBlock("match");
- llvm::BasicBlock *Unwind = 0;
-
- assert(PrevLandingPad == 0 && "EHSpec has invoke context");
- (void)PrevLandingPad;
-
- llvm::BasicBlock *Cont = createBasicBlock("cont");
-
- EmitBranchThroughCleanup(Cont);
-
- // Emit the statements in the try {} block
- setInvokeDest(EHSpecHandler);
-
- EmitBlock(EHSpecHandler);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
- SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- Proto->getNumExceptions()+1));
-
- for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) {
- QualType Ty = Proto->getExceptionType(i);
- QualType ExceptType
- = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
- SelectorArgs.push_back(EHType);
- }
- if (Proto->getNumExceptions())
- SelectorArgs.push_back(Null);
-
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- if (Proto->getNumExceptions()) {
- Unwind = createBasicBlock("Unwind");
-
- Builder.CreateStore(Exc, RethrowPtr);
- Builder.CreateCondBr(Builder.CreateICmpSLT(Selector,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- 0)),
- Match, Unwind);
-
- EmitBlock(Match);
- }
- Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn();
- Builder.CreateUnreachable();
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
- if (Proto->getNumExceptions()) {
- EmitBlock(Unwind);
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
- Builder.CreateUnreachable();
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ Filter->setFilter(I, EHType);
}
-
- EmitBlock(Cont);
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
@@ -436,317 +611,936 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- setInvokeDest(0);
+ EHStack.popFilter();
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
- CXXTryStmtInfo Info = EnterCXXTryStmt(S);
+ EnterCXXTryStmt(S);
EmitStmt(S.getTryBlock());
- ExitCXXTryStmt(S, Info);
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
}
-CodeGenFunction::CXXTryStmtInfo
-CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
- CXXTryStmtInfo Info;
- Info.SavedLandingPad = getInvokeDest();
- Info.HandlerBlock = createBasicBlock("try.handler");
- Info.FinallyBlock = createBasicBlock("finally");
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::LazyCleanup:
+ return !cast<EHLazyCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
- PushCleanupBlock(Info.FinallyBlock);
- setInvokeDest(Info.HandlerBlock);
+ // Suppress warning.
+ return false;
+}
- return Info;
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ if (!Exceptions)
+ return 0;
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
}
-void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
- CXXTryStmtInfo TryInfo) {
- // Pointer to the personality function
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad;
- llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock;
- llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock;
- llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
-
- // Jump to end if there is no exception
- EmitBranchThroughCleanup(FinallyEnd);
-
- llvm::BasicBlock *TerminateHandler = getTerminateHandler();
-
- // Emit the handlers
- EmitBlock(TryHandler);
-
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ // This function contains a hack to work around a design flaw in
+ // LLVM's EH IR which breaks semantics after inlining. This same
+ // hack is implemented in llvm-gcc.
+ //
+ // The LLVM EH abstraction is basically a thin veneer over the
+ // traditional GCC zero-cost design: for each range of instructions
+ // in the function, there is (at most) one "landing pad" with an
+ // associated chain of EH actions. A language-specific personality
+ // function interprets this chain of actions and (1) decides whether
+ // or not to resume execution at the landing pad and (2) if so,
+ // provides an integer indicating why it's stopping. In LLVM IR,
+ // the association of a landing pad with a range of instructions is
+ // achieved via an invoke instruction, the chain of actions becomes
+ // the arguments to the @llvm.eh.selector call, and the selector
+ // call returns the integer indicator. Other than the required
+ // presence of two intrinsic function calls in the landing pad,
+ // the IR exactly describes the layout of the output code.
+ //
+ // A principal advantage of this design is that it is completely
+ // language-agnostic; in theory, the LLVM optimizers can treat
+ // landing pads neutrally, and targets need only know how to lower
+ // the intrinsics to have a functioning exceptions system (assuming
+ // that platform exceptions follow something approximately like the
+ // GCC design). Unfortunately, landing pads cannot be combined in a
+ // language-agnostic way: given selectors A and B, there is no way
+ // to make a single landing pad which faithfully represents the
+ // semantics of propagating an exception first through A, then
+ // through B, without knowing how the personality will interpret the
+ // (lowered form of the) selectors. This means that inlining has no
+ // choice but to crudely chain invokes (i.e., to ignore invokes in
+ // the inlined function, but to turn all unwindable calls into
+ // invokes), which is only semantically valid if every unwind stops
+ // at every landing pad.
+ //
+ // Therefore, the invoke-inline hack is to guarantee that every
+ // landing pad has a catch-all.
+ const bool UseInvokeInlineHack = true;
+
+ for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
+ assert(ir != EHStack.end() &&
+ "stack requiring landing pad is nothing but non-EH scopes?");
+
+ // If this is a terminate scope, just use the singleton terminate
+ // landing pad.
+ if (isa<EHTerminateScope>(*ir))
+ return getTerminateLandingPad();
+
+ // If this isn't an EH scope, iterate; otherwise break out.
+ if (!isNonEHScope(*ir)) break;
+ ++ir;
+
+ // We haven't checked this scope for a cached landing pad yet.
+ if (llvm::BasicBlock *LP = ir->getCachedLandingPad())
+ return LP;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *LP = createBasicBlock("lpad");
+ EmitBlock(LP);
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+ Builder.CreateStore(Exn, getExceptionSlot());
+
+ // Build the selector arguments.
+ llvm::SmallVector<llvm::Value*, 8> EHSelector;
+ EHSelector.push_back(Exn);
+ EHSelector.push_back(getPersonalityFn(*this));
+
+ // Accumulate all the handlers in scope.
+ llvm::DenseMap<llvm::Value*, JumpDest> EHHandlers;
+ JumpDest CatchAll;
+ bool HasEHCleanup = false;
+ bool HasEHFilter = false;
+ llvm::SmallVector<llvm::Value*, 8> EHFilters;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::LazyCleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHLazyCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Cleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!CatchAll.Block && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the selector in wierd ways.
+ EHFilterScope &Filter = cast<EHFilterScope>(*I);
+ HasEHFilter = true;
+
+ // Add all the filter values which we aren't already explicitly
+ // catching.
+ for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) {
+ llvm::Value *FV = Filter.getFilter(I);
+ if (!EHHandlers.count(FV))
+ EHFilters.push_back(FV);
+ }
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!CatchAll.Block);
+ CatchAll.Block = getTerminateHandler();
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &Catch = cast<EHCatchScope>(*I);
+ for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) {
+ EHCatchScope::Handler Handler = Catch.getHandler(HI);
+
+ // Catch-all. We should only have one of these per catch.
+ if (!Handler.Type) {
+ assert(!CatchAll.Block);
+ CatchAll.Block = Handler.Block;
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ continue;
+ }
+
+ // Check whether we already have a handler for this type.
+ JumpDest &Dest = EHHandlers[Handler.Type];
+ if (Dest.Block) continue;
+
+ EHSelector.push_back(Handler.Type);
+ Dest.Block = Handler.Block;
+ Dest.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ }
+
+ // Stop if we found a catch-all.
+ if (CatchAll.Block) break;
+ }
+
+ done:
+ unsigned LastToEmitInLoop = EHSelector.size();
+
+ // If we have a catch-all, add null to the selector.
+ if (CatchAll.Block) {
+ EHSelector.push_back(getCatchAllValue(CGF));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the selector, which is to say, at the end.
+ } else if (HasEHFilter) {
+ // Create a filter expression: an integer constant saying how many
+ // filters there are (+1 to avoid ambiguity with 0 for cleanup),
+ // followed by the filter types. The personality routine only
+ // lands here if the filter doesn't match.
+ EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(),
+ EHFilters.size() + 1));
+ EHSelector.append(EHFilters.begin(), EHFilters.end());
+
+ // Also check whether we need a cleanup.
+ if (UseInvokeInlineHack || HasEHCleanup)
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (UseInvokeInlineHack || HasEHCleanup) {
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+ } else {
+ assert(LastToEmitInLoop > 2);
+ LastToEmitInLoop--;
+ }
+
+ assert(EHSelector.size() >= 3 && "selector call has only two arguments!");
+
+ // Tell the backend how to generate the landing pad.
+ llvm::CallInst *Selection =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ EHSelector.begin(), EHSelector.end(), "eh.selector");
+ Selection->setDoesNotThrow();
+
+ // Select the right handler.
llvm::Value *llvm_eh_typeid_for =
CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- if (CatchParam) {
- // C++ [except.handle]p3 indicates that top-level cv-qualifiers
- // are ignored.
- QualType CaughtType = C->getCaughtType().getNonReferenceType();
- llvm::Value *EHTypeInfo
- = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true);
- SelectorArgs.push_back(EHTypeInfo);
+
+ // The results of llvm_eh_typeid_for aren't reliable --- at least
+ // not locally --- so we basically have to do this as an 'if' chain.
+ // We walk through the first N-1 catch clauses, testing and chaining,
+ // and then fall into the final clause (which is either a cleanup, a
+ // filter (possibly with a cleanup), a catch-all, or another catch).
+ for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
+ llvm::Value *Type = EHSelector[I];
+ JumpDest Dest = EHHandlers[Type];
+ assert(Dest.Block && "no handler entry for value in selector?");
+
+ // Figure out where to branch on a match. As a debug code-size
+ // optimization, if the scope depth matches the innermost cleanup,
+ // we branch directly to the catch handler.
+ llvm::BasicBlock *Match = Dest.Block;
+ bool MatchNeedsCleanup = Dest.ScopeDepth != EHStack.getInnermostEHCleanup();
+ if (MatchNeedsCleanup)
+ Match = createBasicBlock("eh.match");
+
+ llvm::BasicBlock *Next = createBasicBlock("eh.next");
+
+ // Check whether the exception matches.
+ llvm::CallInst *Id
+ = Builder.CreateCall(llvm_eh_typeid_for,
+ Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Id->setDoesNotThrow();
+ Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
+ Match, Next);
+
+ // Emit match code if necessary.
+ if (MatchNeedsCleanup) {
+ EmitBlock(Match);
+ EmitBranchThroughEHCleanup(Dest);
+ }
+
+ // Continue to the next match.
+ EmitBlock(Next);
+ }
+
+ // Emit the final case in the selector.
+ // This might be a catch-all....
+ if (CatchAll.Block) {
+ assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
+ EmitBranchThroughEHCleanup(CatchAll);
+
+ // ...or an EH filter...
+ } else if (HasEHFilter) {
+ llvm::Value *SavedSelection = Selection;
+
+ // First, unwind out to the outermost scope if necessary.
+ if (EHStack.hasEHCleanups()) {
+ // The end here might not dominate the beginning, so we might need to
+ // save the selector if we need it.
+ llvm::AllocaInst *SelectorVar = 0;
+ if (HasEHCleanup) {
+ SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var");
+ Builder.CreateStore(Selection, SelectorVar);
+ }
+
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
+ EmitBranchThroughEHCleanup(JumpDest(CleanupContBB, EHStack.stable_end()));
+ EmitBlock(CleanupContBB);
+
+ if (HasEHCleanup)
+ SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector");
+ }
+
+ // If there was a cleanup, we'll need to actually check whether we
+ // landed here because the filter triggered.
+ if (UseInvokeInlineHack || HasEHCleanup) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup");
+ llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);
+ llvm::Value *FailsFilter =
+ Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
+ Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB);
+
+ // The rethrow block is where we land if this was a cleanup.
+ // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off?
+ EmitBlock(RethrowBB);
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ EmitBlock(UnexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ Builder.CreateCall(getUnexpectedFn(*this),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+
+ // ...or a normal catch handler...
+ } else if (!UseInvokeInlineHack && !HasEHCleanup) {
+ llvm::Value *Type = EHSelector.back();
+ EmitBranchThroughEHCleanup(EHHandlers[Type]);
+
+ // ...or a cleanup.
+ } else {
+ // We emit a jump to a notional label at the outermost unwind state.
+ llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
+ JumpDest Dest(Unwind, EHStack.stable_end());
+ EmitBranchThroughEHCleanup(Dest);
+
+ // The unwind block. We have to reload the exception here because
+ // we might have unwound through arbitrary blocks, so the landing
+ // pad might not dominate.
+ EmitBlock(Unwind);
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(SavedIP);
+
+ return LP;
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::LazyCleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0);
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
+
+ CGF.EHStack.pushLazyCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ bool EndCatchMightThrow = cast<ReferenceType>(CatchType)->getPointeeType()
+ ->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
} else {
- // null indicates catch all
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, CatchType);
}
+ return;
}
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- SelectorArgs.push_back(Null);
+ // FIXME: this *really* needs to be done via a proper, Sema-emitted
+ // initializer expression.
+
+ CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
+ assert(RD && "aggregate catch type was not a record!");
+
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ if (RD->hasTrivialCopyConstructor()) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ return;
}
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- Stmt *CatchBody = C->getHandlerBlock();
-
- llvm::BasicBlock *Next = 0;
-
- if (SelectorArgs[i+2] != Null) {
- llvm::BasicBlock *Match = createBasicBlock("match");
- Next = createBasicBlock("catch.next");
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Value *Id
- = Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(SelectorArgs[i+2],
- Int8PtrTy));
- Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
- EmitBlock(Match);
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *AdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ AdjustedExn->setDoesNotThrow();
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
+ assert(CD && "record has no copy constructor!");
+ llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+
+ CallArgList CallArgs;
+ CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
+ CD->getThisType(CGF.getContext())));
+ CallArgs.push_back(std::make_pair(RValue::get(Cast),
+ CD->getParamDecl(0)->getType()));
+
+ const FunctionProtoType *FPT
+ = CD->getType()->getAs<FunctionProtoType>();
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ CopyCtor, ReturnValueSlot(), CallArgs, CD);
+ CGF.EHStack.popTerminate();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by initializing the exception variable with a
+ // "special initializer", InitCatchParam. Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitLocalBlockVarDecl creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitLocalBlockVarDecl enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+}
+
+namespace {
+ struct CallRethrow : EHScopeStack::LazyCleanup {
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
+ }
+ };
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers.
+ bool ImplicitRethrow = false;
+ if (IsFnTryBlock)
+ ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I].Block;
+ EmitBlock(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // If there's an implicit rethrow, push a normal "cleanup" to call
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushLazyCleanup<CallRethrow>(NormalCleanup);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+CodeGenFunction::FinallyInfo
+CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn) {
+ assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(RethrowFn && "rethrow function is required");
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ const llvm::FunctionType *RethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(RethrowFn->getType())
+ ->getElementType());
+ llvm::Value *SavedExnVar = 0;
+ if (RethrowFnTy->getNumParams())
+ SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ FinallyInfo Info;
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "finally.for-eh");
+ InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+
+ // Enter a normal cleanup which will perform the @finally block.
+ {
+ CodeGenFunction::CleanupBlock Cleanup(*this, NormalCleanup);
+
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn) {
+ CodeGenFunction::CleanupBlock FinallyExitCleanup(CGF, NormalAndEHCleanup);
+
+ llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ EmitBlock(EndCatchBB);
+ EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ EmitBlock(CleanupContBB);
}
- llvm::BasicBlock *MatchEnd = createBasicBlock("match.end");
- llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler");
-
- PushCleanupBlock(MatchEnd);
- setInvokeDest(MatchHandler);
-
- llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc);
-
- {
- CleanupScope CatchScope(*this);
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- QualType CatchType = CatchParam->getType().getNonReferenceType();
- setInvokeDest(TerminateHandler);
- bool WasPointer = true;
- bool WasPointerReference = false;
- CatchType = CGM.getContext().getCanonicalType(CatchType);
- if (CatchType.getTypePtr()->isPointerType()) {
- if (isa<ReferenceType>(CatchParam->getType()))
- WasPointerReference = true;
- } else {
- if (!isa<ReferenceType>(CatchParam->getType()))
- WasPointer = false;
- CatchType = getContext().getPointerType(CatchType);
- }
- ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType));
- EmitLocalBlockVarDecl(*CatchParam);
- // FIXME: we need to do this sooner so that the EH region for the
- // cleanup doesn't start until after the ctor completes, use a decl
- // init?
- CopyObject(*this, CatchParam->getType().getNonReferenceType(),
- WasPointer, WasPointerReference, ExcObject,
- GetAddrOfLocalVar(CatchParam));
- setInvokeDest(MatchHandler);
+ // Emit the finally block.
+ EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ llvm::Value *Args[] = { Builder.CreateLoad(SavedExnVar) };
+ EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ } else {
+ EmitCallOrInvoke(RethrowFn, 0, 0);
}
+ Builder.CreateUnreachable();
- EmitStmt(CatchBody);
+ EmitBlock(ContBB);
}
- EmitBranchThroughCleanup(FinallyEnd);
-
- EmitBlock(MatchHandler);
-
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality,
- llvm::ConstantInt::getNullValue(llvm::Type::getInt32Ty(VMContext))
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
-
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
-
- EmitBlock(MatchEnd);
-
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getEndCatchFn(*this),
- Cont, TerminateHandler,
- &Args[0], &Args[0]);
- EmitBlock(Cont);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
-
- Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
-
- if (Next)
- EmitBlock(Next);
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ EnsureInsertPoint();
}
- if (!HasCatchAll) {
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+ Builder.SetInsertPoint(CatchAllBB);
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotThrow();
}
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
+ Builder.CreateStore(SavedExn, SavedExnVar);
+ }
- setInvokeDest(PrevLandingPad);
+ // Tell the finally block that we're in EH.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
- EmitBlock(FinallyBlock);
+ // Thread a jump through the finally cleanup.
+ EmitBranchThroughCleanup(RethrowDest);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ Builder.restoreIP(SavedIP);
- // Branch around the rethrow code.
- EmitBranch(FinallyEnd);
+ EHCatchScope *CatchScope = EHStack.pushCatch(1);
+ CatchScope->setCatchAllHandler(0, CatchAllBB);
- EmitBlock(FinallyRethrow);
- // FIXME: Eventually we can chain the handlers together and just do a call
- // here.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
+ return Info;
+}
- Builder.CreateUnreachable();
+void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
+ // Leave the finally catch-all.
+ EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
+ llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
+ EHStack.popCatch();
+
+ // And leave the normal cleanup.
+ PopCleanupBlock();
- EmitBlock(FinallyEnd);
-}
-
-CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
- CGF.setInvokeDest(PreviousInvokeDest);
-
- llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock();
-
- // Jump to the beginning of the cleanup.
- CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin());
-
- // The libstdc++ personality function.
- // TODO: generalize to work with other libraries.
- llvm::Constant *Personality = getPersonalityFn(CGF.CGM);
-
- // %exception = call i8* @llvm.eh.exception()
- // Magic intrinsic which tells gives us a handle to the caught
- // exception.
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
-
- llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty);
-
- // %ignored = call i32 @llvm.eh.selector(i8* %exception,
- // i8* @__gxx_personality_v0,
- // i8* null)
- // Magic intrinsic which tells LLVM that this invoke landing pad is
- // just a cleanup block.
- llvm::Value *Args[] = { Exc, Personality, Null };
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
-
- // And then we fall through into the code that the user put there.
- // Jump back to the end of the cleanup.
- CGF.Builder.SetInsertPoint(EndOfCleanup);
-
- // Rethrow the exception.
- if (CGF.getInvokeDest()) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont,
- CGF.getInvokeDest(), Exc);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc);
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(CatchAllBB, true);
+
+ Builder.restoreIP(SavedIP);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+
+ // Tell the backend what the exception table should be:
+ // nothing but a catch-all.
+ llvm::Value *Args[3] = { Exn, getPersonalityFn(*this),
+ getCatchAllValue(*this) };
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ Args, Args+3, "eh.selector")
+ ->setDoesNotThrow();
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
- // Resume inserting where we started, but put the new cleanup
- // handler in place.
- if (PreviousInsertionBlock)
- CGF.Builder.SetInsertPoint(PreviousInsertionBlock);
- else
- CGF.Builder.ClearInsertionPoint();
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
- if (CGF.Exceptions)
- CGF.setInvokeDest(CleanupHandler);
+ return TerminateLandingPad;
}
llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
- // We don't want to change anything at the current location, so
- // save it aside and clear the insert point.
- llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock();
- llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
- Builder.ClearInsertionPoint();
-
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- // Set up terminate handler
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
- EmitBlock(TerminateHandler);
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality,
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- llvm::CallInst *TerminateCall =
- Builder.CreateCall(getTerminateFn(*this));
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
TerminateCall->setDoesNotThrow();
Builder.CreateUnreachable();
// Restore the saved insertion state.
- Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint);
+ Builder.restoreIP(SavedIP);
return TerminateHandler;
}
+
+CodeGenFunction::CleanupBlock::CleanupBlock(CodeGenFunction &CGF,
+ CleanupKind Kind)
+ : CGF(CGF), SavedIP(CGF.Builder.saveIP()), NormalCleanupExitBB(0) {
+ llvm::BasicBlock *EntryBB = CGF.createBasicBlock("cleanup");
+ CGF.Builder.SetInsertPoint(EntryBB);
+
+ switch (Kind) {
+ case NormalAndEHCleanup:
+ NormalCleanupEntryBB = EHCleanupEntryBB = EntryBB;
+ break;
+
+ case NormalCleanup:
+ NormalCleanupEntryBB = EntryBB;
+ EHCleanupEntryBB = 0;
+ break;
+
+ case EHCleanup:
+ NormalCleanupEntryBB = 0;
+ EHCleanupEntryBB = EntryBB;
+ CGF.EHStack.pushTerminate();
+ break;
+ }
+}
+
+void CodeGenFunction::CleanupBlock::beginEHCleanup() {
+ assert(EHCleanupEntryBB == 0 && "already started an EH cleanup");
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+
+ EHCleanupEntryBB = CGF.createBasicBlock("eh.cleanup");
+ CGF.Builder.SetInsertPoint(EHCleanupEntryBB);
+ CGF.EHStack.pushTerminate();
+}
+
+CodeGenFunction::CleanupBlock::~CleanupBlock() {
+ llvm::BasicBlock *EHCleanupExitBB = 0;
+
+ // If we're currently writing the EH cleanup...
+ if (EHCleanupEntryBB) {
+ // Set the EH cleanup exit block.
+ EHCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(EHCleanupExitBB && "end of EH cleanup is unreachable");
+
+ // If we're actually writing both at once, set the normal exit, too.
+ if (EHCleanupEntryBB == NormalCleanupEntryBB)
+ NormalCleanupExitBB = EHCleanupExitBB;
+
+ // Otherwise, we must have pushed a terminate handler.
+ else
+ CGF.EHStack.popTerminate();
+
+ // Otherwise, just set the normal cleanup exit block.
+ } else {
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+ }
+
+ CGF.EHStack.pushCleanup(NormalCleanupEntryBB, NormalCleanupExitBB,
+ EHCleanupEntryBB, EHCleanupExitBB);
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+EHScopeStack::LazyCleanup::~LazyCleanup() {
+ llvm_unreachable("LazyCleanup is indestructable");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
new file mode 100644
index 0000000..80739cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h
@@ -0,0 +1,428 @@
+//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for exceptions in
+// C++ and Objective C.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGEXCEPTION_H
+#define CLANG_CODEGEN_CGEXCEPTION_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGException.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 3;
+
+protected:
+ enum { BitsRemaining = 29 };
+
+public:
+ enum Kind { Cleanup, LazyCleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ static Handler make(llvm::Value *Type, llvm::BasicBlock *Block) {
+ Handler Temp;
+ Temp.Type = Type;
+ Temp.Block = Block;
+ return Temp;
+ }
+ };
+
+private:
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I] = Handler::make(Type, Block);
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHLazyCleanupScope : public EHScope {
+ /// Whether this cleanup needs to be run along normal edges.
+ bool IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ bool IsEHCleanup : 1;
+
+ /// The amount of extra storage needed by the LazyCleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining - 14;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// The dual entry/exit block along the EH edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *EHBlock;
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHLazyCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHLazyCleanupScope) + CleanupSize;
+ }
+
+ EHLazyCleanupScope(bool IsNormal, bool IsEH, unsigned CleanupSize,
+ unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::LazyCleanup),
+ IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
+ CleanupSize(CleanupSize), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalBlock(0), EHBlock(0)
+ {}
+
+ bool isNormalCleanup() const { return IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return EHBlock; }
+ void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ size_t getCleanupSize() const { return CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::LazyCleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::LazyCleanup*>(getCleanupBuffer());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == LazyCleanup);
+ }
+};
+
+/// A scope which needs to execute some code if we try to unwind ---
+/// either normally, via the EH mechanism, or both --- through it.
+class EHCleanupScope : public EHScope {
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ llvm::BasicBlock *NormalEntry;
+ llvm::BasicBlock *NormalExit;
+ llvm::BasicBlock *EHEntry;
+ llvm::BasicBlock *EHExit;
+
+public:
+ static size_t getSize() { return sizeof(EHCleanupScope); }
+
+ EHCleanupScope(unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH,
+ llvm::BasicBlock *NormalEntry, llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry, llvm::BasicBlock *EHExit)
+ : EHScope(Cleanup), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalEntry(NormalEntry), NormalExit(NormalExit),
+ EHEntry(EHEntry), EHExit(EHExit) {
+ assert((NormalEntry != 0) == (NormalExit != 0));
+ assert((EHEntry != 0) == (EHExit != 0));
+ }
+
+ bool isNormalCleanup() const { return NormalEntry != 0; }
+ bool isEHCleanup() const { return EHEntry != 0; }
+
+ llvm::BasicBlock *getNormalEntry() const { return NormalEntry; }
+ llvm::BasicBlock *getNormalExit() const { return NormalExit; }
+ llvm::BasicBlock *getEHEntry() const { return EHEntry; }
+ llvm::BasicBlock *getEHExit() const { return EHExit; }
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Cleanup;
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope() : EHScope(Terminate) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::LazyCleanup:
+ Ptr += static_cast<const EHLazyCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += EHCleanupScope::getSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index d67618b..43bab9f 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -19,7 +19,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Intrinsics.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -44,8 +44,8 @@ void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
}
-llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
- const llvm::Twine &Name) {
+llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -53,8 +53,8 @@ llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
return Alloc;
}
-llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty,
- const llvm::Twine &Name) {
+llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -168,49 +168,62 @@ struct SubobjectAdjustment {
}
};
-RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
- bool IsInitializer) {
- bool ShouldDestroyTemporaries = false;
- unsigned OldNumLiveTemporaries = 0;
+static llvm::Value *
+CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
+ const NamedDecl *InitializedDecl) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::SmallString<256> Name;
+ CGF.CGM.getMangleContext().mangleReferenceTemporary(VD, Name);
+
+ const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+
+ // Create the reference temporary.
+ llvm::GlobalValue *RefTemp =
+ new llvm::GlobalVariable(CGF.CGM.getModule(),
+ RefTempTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(RefTempTy),
+ Name.str());
+ return RefTemp;
+ }
+ }
+
+ return CGF.CreateMemTemp(Type, "ref.tmp");
+}
+static llvm::Value *
+EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
+ llvm::Value *&ReferenceTemporary,
+ const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ const NamedDecl *InitializedDecl) {
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
-
+
if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
- ShouldDestroyTemporaries = true;
-
- // Keep track of the current cleanup stack depth.
- OldNumLiveTemporaries = LiveTemporaries.size();
-
- E = TE->getSubExpr();
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+
+ return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
}
-
- RValue Val;
- if (E->isLvalue(getContext()) == Expr::LV_Valid) {
- // Emit the expr as an lvalue.
- LValue LV = EmitLValue(E);
- if (LV.isSimple()) {
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
-
- return RValue::get(LV.getAddress());
- }
-
- Val = EmitLoadOfLValue(LV, E->getType());
+
+ RValue RV;
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
+ // Emit the expression as an lvalue.
+ LValue LV = CGF.EmitLValue(E);
+
+ if (LV.isSimple())
+ return LV.getAddress();
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
+ // We have to load the lvalue.
+ RV = CGF.EmitLoadOfLValue(LV, E->getType());
} else {
QualType ResultTy = E->getType();
-
+
llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
- do {
+ while (true) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
E = PE->getSubExpr();
continue;
@@ -233,7 +246,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
continue;
}
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid &&
+ if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
ME->getBase()->getType()->isRecordType()) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
E = ME->getBase();
@@ -246,63 +259,46 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
// Nothing changed.
break;
- } while (true);
-
- Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
- IsInitializer);
-
- if (ShouldDestroyTemporaries) {
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
- }
+ }
- if (IsInitializer) {
- // We might have to destroy the temporary variable.
+ // Create a reference temporary if necessary.
+ if (CGF.hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+ RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false,
+ /*IgnoreResult=*/false, InitializedDecl);
+
+ if (InitializedDecl) {
+ // Get the destructor for the reference temporary.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->hasTrivialDestructor()) {
- const CXXDestructorDecl *Dtor =
- ClassDecl->getDestructor(getContext());
-
- {
- DelayedCleanupBlock Scope(*this);
- EmitCXXDestructorCall(Dtor, Dtor_Complete,
- /*ForVirtualBase=*/false,
- Val.getAggregateAddr());
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(Dtor, Dtor_Complete,
- /*ForVirtualBase=*/false,
- Val.getAggregateAddr());
- }
- }
- }
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
}
}
-
+
// Check if need to perform derived-to-base casts and/or field accesses, to
// get from the temporary object we created (and, potentially, for which we
// extended the lifetime) to the subobject we're binding the reference to.
if (!Adjustments.empty()) {
- llvm::Value *Object = Val.getAggregateAddr();
+ llvm::Value *Object = RV.getAggregateAddr();
for (unsigned I = Adjustments.size(); I != 0; --I) {
SubobjectAdjustment &Adjustment = Adjustments[I-1];
switch (Adjustment.Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
- Object = GetAddressOfBaseClass(Object,
- Adjustment.DerivedToBase.DerivedClass,
- *Adjustment.DerivedToBase.BasePath,
- /*NullCheckValue=*/false);
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ *Adjustment.DerivedToBase.BasePath,
+ /*NullCheckValue=*/false);
break;
case SubobjectAdjustment::FieldAdjustment: {
unsigned CVR = Adjustment.Field.CVRQualifiers;
- LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR);
+ LValue LV =
+ CGF.EmitLValueForField(Object, Adjustment.Field.Field, CVR);
if (LV.isSimple()) {
Object = LV.getAddress();
break;
@@ -312,36 +308,72 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
// the object we're binding to.
QualType T = Adjustment.Field.Field->getType().getNonReferenceType()
.getUnqualifiedType();
- Object = CreateTempAlloca(ConvertType(T), "lv");
- EmitStoreThroughLValue(EmitLoadOfLValue(LV, T),
- LValue::MakeAddr(Object,
- Qualifiers::fromCVRMask(CVR)),
- T);
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = LValue::MakeAddr(Object,
+ Qualifiers::fromCVRMask(CVR));
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
break;
}
+
}
}
- const llvm::Type *ResultPtrTy
- = llvm::PointerType::get(ConvertType(ResultTy), 0);
- Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp");
- return RValue::get(Object);
+ const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo();
+ return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp");
}
}
- if (Val.isAggregate()) {
- Val = RValue::get(Val.getAggregateAddr());
- } else {
- // Create a temporary variable that we can bind the reference to.
- llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp");
- if (Val.isScalar())
- EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
- else
- StoreComplexToAddr(Val.getComplexVal(), Temp, false);
- Val = RValue::get(Temp);
+ if (RV.isAggregate())
+ return RV.getAggregateAddr();
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+ if (RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
+ /*Volatile=*/false, E->getType());
+ else
+ CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
+ /*Volatile=*/false);
+ return ReferenceTemporary;
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl) {
+ llvm::Value *ReferenceTemporary = 0;
+ const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ InitializedDecl);
+
+ if (!ReferenceTemporaryDtor)
+ return RValue::get(Value);
+
+ // Make sure to call the destructor for the reference temporary.
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ llvm::Constant *DtorFn =
+ CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
+
+ return RValue::get(Value);
+ }
+ }
+
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, ReferenceTemporary);
+
+ if (Exceptions) {
+ Cleanup.beginEHCleanup();
+ EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, ReferenceTemporary);
}
- return Val;
+ return RValue::get(Value);
}
@@ -359,118 +391,28 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
if (!CatchUndefined)
return;
- const llvm::Type *Size_tTy
- = llvm::IntegerType::get(VMContext, LLVMPointerWidth);
Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1);
- const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1);
+ const llvm::Type *IntPtrT = IntPtrTy;
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
+ const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
// In time, people may want to control this and use a 1 here.
llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
llvm::BasicBlock *Cont = createBasicBlock();
llvm::BasicBlock *Check = createBasicBlock();
- llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL);
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
EmitBlock(Check);
Builder.CreateCondBr(Builder.CreateICmpUGE(C,
- llvm::ConstantInt::get(Size_tTy, Size)),
+ llvm::ConstantInt::get(IntPtrTy, Size)),
Cont, getTrapBB());
EmitBlock(Cont);
}
-llvm::Value *CodeGenFunction::
-EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
- bool isInc, bool isPre) {
- QualType ValTy = E->getSubExpr()->getType();
- llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal();
-
- int AmountVal = isInc ? 1 : -1;
-
- if (ValTy->isPointerType() &&
- ValTy->getAs<PointerType>()->isVariableArrayType()) {
- // The amount of the addition/subtraction needs to account for the VLA size
- ErrorUnsupported(E, "VLA pointer inc/dec");
- }
-
- llvm::Value *NextVal;
- if (const llvm::PointerType *PT =
- dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
- if (!isa<llvm::FunctionType>(PT->getElementType())) {
- QualType PTEE = ValTy->getPointeeType();
- if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
- // Handle interface types, which are not represented with a concrete
- // type.
- int size = getContext().getTypeSize(OIT) / 8;
- if (!isInc)
- size = -size;
- Inc = llvm::ConstantInt::get(Inc->getType(), size);
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- InVal = Builder.CreateBitCast(InVal, i8Ty);
- NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
- llvm::Value *lhs = LV.getAddress();
- lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy));
- } else
- NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
- } else {
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
- NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
- NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
- NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
- }
- } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
- // Bool++ is an interesting case, due to promotion rules, we get:
- // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
- // Bool = ((int)Bool+1) != 0
- // An interesting aspect of this is that increment is always true.
- // Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue(VMContext);
- } else if (isa<llvm::IntegerType>(InVal->getType())) {
- NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
-
- // Signed integer overflow is undefined behavior.
- if (ValTy->isSignedIntegerType())
- NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
- else
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
- } else {
- // Add the inc/dec to the real part.
- if (InVal->getType()->isFloatTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType()->isDoubleTy())
- NextVal =
- llvm::ConstantFP::get(VMContext,
- llvm::APFloat(static_cast<double>(AmountVal)));
- else {
- llvm::APFloat F(static_cast<float>(AmountVal));
- bool ignored;
- F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
- &ignored);
- NextVal = llvm::ConstantFP::get(VMContext, F);
- }
- NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
- }
-
- // Store the updated result through the lvalue.
- if (LV.isBitField())
- EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
- else
- EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
-
- // If this is a postinc, return the value read from memory, otherwise use the
- // updated value.
- return isPre ? NextVal : InVal;
-}
-
-
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@@ -568,6 +510,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
case Expr::BinaryOperatorClass:
@@ -600,8 +544,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
case Expr::CXXExprWithTemporariesClass:
return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
- case Expr::CXXZeroInitValueExprClass:
- return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E));
+ case Expr::CXXScalarValueInitExprClass:
+ return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
case Expr::CXXTypeidExprClass:
@@ -816,8 +760,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
const VectorType *ExprVT = ExprType->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
- llvm::Value *Elt = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), InIdx);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
}
@@ -827,8 +770,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), InIdx));
+ Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
@@ -1044,8 +986,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), i);
+ Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
@@ -1058,7 +999,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
llvm::SmallVector<llvm::Constant*, 4> ExtMask;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -1089,7 +1029,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
}
@@ -1401,6 +1340,22 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
return TrapBB;
}
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const CastExpr *CE = dyn_cast<CastExpr>(E);
+ if (CE == 0 || CE->getCastKind() != CastExpr::CK_ArrayToPointerDecay)
+ return 0;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return 0;
+
+ return SubExpr;
+}
+
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// The index must always be an integer, which is not an aggregate. Emit it.
llvm::Value *Idx = EmitScalarExpr(E->getIdx());
@@ -1413,25 +1368,19 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- Idx = Builder.CreateIntCast(Idx,
- llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx");
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType().getCVRQualifiers());
}
- // The base must be a pointer, which is not an aggregate. Emit it.
- llvm::Value *Base = EmitScalarExpr(E->getBase());
-
// Extend or truncate the index type to 32 or 64-bits.
- unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
- if (IdxBitwidth != LLVMPointerWidth)
- Idx = Builder.CreateIntCast(Idx,
- llvm::IntegerType::get(VMContext, LLVMPointerWidth),
+ if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy,
IdxSigned, "idxprom");
-
+
// FIXME: As llvm implements the object size checking, this can come out.
if (CatchUndefined) {
- if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) {
if (const ConstantArrayType *CAT
@@ -1463,9 +1412,13 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateUDiv(Idx,
llvm::ConstantInt::get(Idx->getType(),
BaseTypeSize.getQuantity()));
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
- } else if (const ObjCObjectType *OIT =
- E->getType()->getAs<ObjCObjectType>()) {
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
getContext().getTypeSizeInChars(OIT).getQuantity());
@@ -1473,10 +1426,27 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
Idx = Builder.CreateMul(Idx, InterfaceSize);
const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ llvm::Value *ArrayPtr = EmitLValue(Array).getAddress();
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ llvm::Value *Args[] = { Zero, Idx };
+
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
} else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
}
@@ -1501,17 +1471,15 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
llvm::SmallVector<unsigned, 4> &Elts) {
llvm::SmallVector<llvm::Constant*, 4> CElts;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), Elts[i]));
+ CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
return llvm::ConstantVector::get(&CElts[0], CElts.size());
}
LValue CodeGenFunction::
EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
-
// Emit the base vector as an l-value.
LValue Base;
@@ -1816,10 +1784,18 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getSubExpr()->getType());
+ assert (!RV.isScalar() && "EmitCastLValue");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
// Perform the derived-to-base conversion
llvm::Value *Base =
- GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl,
+ GetAddressOfBaseClass(This, DerivedClassDecl,
E->getBasePath(), /*NullCheckValue=*/false);
return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
@@ -1840,7 +1816,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
}
- case CastExpr::CK_BitCast: {
+ case CastExpr::CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
@@ -1853,7 +1829,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
}
LValue CodeGenFunction::EmitNullInitializationLValue(
- const CXXZeroInitValueExpr *E) {
+ const CXXScalarValueInitExpr *E) {
QualType Ty = E->getType();
LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
EmitNullInitialization(LV.getAddress(), Ty);
@@ -1966,15 +1942,28 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
- PushCXXTemporary(E->getTemporary(), LV.getAddress());
+ EmitCXXTemporary(E->getTemporary(), LV.getAddress());
return LV;
}
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
- // Can only get l-value for message expression returning aggregate type
RValue RV = EmitObjCMessageExpr(E);
- // FIXME: can this be volatile?
- return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+
+ if (!RV.isScalar())
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ MakeQualifiers(E->getType()));
+
+ assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ llvm::Value *V =
+ CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
index a4e64fb..219a5f9 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -127,7 +127,7 @@ public:
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
- void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
void VisitVAArgExpr(VAArgExpr *E);
@@ -177,11 +177,16 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
/// directly into the return value slot. If GC does interfere, a final
/// move will be performed.
void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (!RequiresGCollection) return;
-
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
Src.getAggregateAddr(),
- E->getType());
+ SizeVal);
+ }
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -198,9 +203,14 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
}
if (RequiresGCollection) {
+ std::pair<uint64_t, unsigned> TypeInfo =
+ CGF.getContext().getTypeInfo(E->getType());
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
DestPtr, Src.getAggregateAddr(),
- E->getType());
+ SizeVal);
return;
}
// If the result of the assignment is used, copy the LHS there also.
@@ -297,6 +307,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
+ case CastExpr::CK_LValueBitCast:
+ llvm_unreachable("there are no lvalue bit-casts on aggregates");
+ break;
+
case CastExpr::CK_BitCast: {
// This must be a member function pointer cast.
Visit(E->getSubExpr());
@@ -396,35 +410,11 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
const llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
- llvm::Value *FuncPtr;
-
- if (MD->isVirtual()) {
- int64_t Index = CGF.CGM.getVTables().getMethodVTableIndex(MD);
-
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes =
- CGF.CGM.getContext().Target.getPointerWidth(0) / 8;
-
- // Itanium C++ ABI 2.3:
- // For a non-virtual function, this field is a simple function pointer.
- // For a virtual function, it is 1 plus the virtual table offset
- // (in bytes) of the function, represented as a ptrdiff_t.
- FuncPtr = llvm::ConstantInt::get(PtrDiffTy,
- (Index * PointerWidthInBytes) + 1);
- } else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGF.CGM.getTypes().GetFunctionType(CGF.CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Fn = CGF.CGM.GetAddrOfFunction(MD, Ty);
- FuncPtr = llvm::ConstantExpr::getPtrToInt(Fn, PtrDiffTy);
- }
+ llvm::Value *FuncPtr = CGF.CGM.GetCXXMemberFunctionPointerValue(MD);
Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
-
// The adjustment will always be 0.
Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
VolatileDest);
@@ -546,17 +536,15 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Don't make this a live temporary if we're emitting an initializer expr.
if (!IsInitializer)
- CGF.PushCXXTemporary(E->getTemporary(), Val);
+ CGF.EmitCXXTemporary(E->getTemporary(), Val);
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
llvm::Value *Val = DestPtr;
- if (!Val) {
- // Create a temporary variable.
+ if (!Val) // Create a temporary variable.
Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
if (E->requiresZeroInitialization())
EmitNullInitializationToLValue(LValue::MakeAddr(Val,
@@ -573,7 +561,7 @@ void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
}
-void AggExprEmitter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
llvm::Value *Val = DestPtr;
if (!Val) {
@@ -602,7 +590,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
- RValue RV = CGF.EmitReferenceBindingToExpr(E, /*IsInitializer=*/false);
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
@@ -822,18 +810,11 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
- if (DestPtr->getType() != BP)
- DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
- if (SrcPtr->getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr =
- llvm::IntegerType::get(VMContext, LLVMPointerWidth);
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
@@ -847,25 +828,46 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
//
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- const llvm::Type *I1Ty = llvm::Type::getInt1Ty(VMContext);
- const llvm::Type *I8Ty = llvm::Type::getInt8Ty(VMContext);
- const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext);
const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- const llvm::Type *DBP = llvm::PointerType::get(I8Ty, DPT->getAddressSpace());
- if (DestPtr->getType() != DBP)
- DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+ const llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- const llvm::Type *SBP = llvm::PointerType::get(I8Ty, SPT->getAddressSpace());
- if (SrcPtr->getType() != SBP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
-
+ const llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace());
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (getContext().getAsArrayType(Ty)) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ unsigned long size = TypeInfo.first/8;
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size);
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(),
- IntPtr),
+ IntPtrTy),
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(I32Ty, TypeInfo.second/8),
- llvm::ConstantInt::get(I1Ty, isVolatile));
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ Builder.getInt32(TypeInfo.second/8),
+ Builder.getInt1(isVolatile));
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
index f93c79c..69e5f0e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -275,10 +275,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
- if (ClassDecl->hasObjectMember())
- CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, This, Src, Ty);
- else
- EmitAggregateCopy(This, Src, Ty);
+ EmitAggregateCopy(This, Src, Ty);
return RValue::get(This);
}
}
@@ -484,6 +481,79 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
return V;
}
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr) {
+
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+ QualType AllocType = E->getAllocatedType();
+
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
+ AllocType.isVolatileQualified(), AllocType);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else
+ CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+}
+
+void
+CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements) {
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
+ "arrayidx");
+ StoreAnyExprIntoOneUnit(*this, E, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements) {
@@ -495,35 +565,32 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
E->constructor_arg_end());
return;
}
+ else {
+ CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
+ return;
+ }
}
-
- QualType AllocType = E->getAllocatedType();
if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ // Per C++ [expr.new]p15, if we have an initializer, then we're performing
+ // direct initialization. C++ [dcl.init]p5 requires that we
+ // zero-initialize storage if there are no user-declared constructors.
+ if (E->hasInitializer() &&
+ !Ctor->getParent()->hasUserDeclaredConstructor() &&
+ !Ctor->getParent()->isEmpty())
+ CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
+
CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
NewPtr, E->constructor_arg_begin(),
E->constructor_arg_end());
return;
}
-
// We have a POD type.
if (E->getNumConstructorArgs() == 0)
return;
-
- assert(E->getNumConstructorArgs() == 1 &&
- "Can only have one argument to initializer of POD type.");
-
- const Expr *Init = E->getConstructorArg(0);
-
- if (!CGF.hasAggregateLLVMType(AllocType))
- CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), AllocType);
- else if (AllocType->isAnyComplexType())
- CGF.EmitComplexExprIntoAddr(Init, NewPtr,
- AllocType.isVolatileQualified());
- else
- CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+
+ StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
@@ -770,7 +837,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!RD->hasTrivialDestructor()) {
- const CXXDestructorDecl *Dtor = RD->getDestructor(getContext());
+ const CXXDestructorDecl *Dtor = RD->getDestructor();
if (E->isArrayForm()) {
llvm::Value *AllocatedObjectPtr;
llvm::Value *NumElements;
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
index 0a0c914..0927319 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -131,14 +131,14 @@ public:
// FIXME: CompoundLiteralExpr
- ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
- return EmitCast(E->getSubExpr(), E->getType());
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
ComplexPairTy VisitStmtExpr(const StmtExpr *E);
@@ -181,7 +181,7 @@ public:
ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
}
- ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
@@ -339,11 +339,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
return Val;
}
-ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ // FIXME: We should be looking at all of the cast kinds here, not
+ // cherry-picking the ones we have test cases for.
+ if (CK == CastExpr::CK_LValueBitCast) {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
// C99 6.3.1.7: When a value of real type is converted to a complex type, the
// real part of the complex result value is determined by the rules of
// conversion to the corresponding real type and the imaginary part of the
@@ -521,22 +532,22 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// improve codegen a little. It is possible for the RHS to be complex or
// scalar.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+ OpInfo.RHS = EmitCast(CastExpr::CK_Unknown, E->getRHS(), OpInfo.Ty);
- LValue LHSLV = CGF.EmitLValue(E->getLHS());
+ LValue LHS = CGF.EmitLValue(E->getLHS());
// We know the LHS is a complex lvalue.
ComplexPairTy LHSComplexPair;
- if (LHSLV.isPropertyRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
- else if (LHSLV.isKVCRef())
- LHSComplexPair =
- CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
+ if (LHS.isPropertyRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal();
+ else if (LHS.isKVCRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal();
else
- LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(),
- LHSLV.isVolatileQualified());
+ LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(),
+ LHS.isVolatileQualified());
- OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+ OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
@@ -545,23 +556,26 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
// Store the result value into the LHS lvalue.
- if (LHSLV.isPropertyRef())
- CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(),
+ if (LHS.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
RValue::getComplex(Result));
- else if (LHSLV.isKVCRef())
- CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result));
+ else if (LHS.isKVCRef())
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result));
else
- EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
- // And now return the LHS
+ EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified());
+
+ // Restore the Ignore* flags.
IgnoreReal = ignreal;
IgnoreImag = ignimag;
IgnoreRealAssign = ignreal;
IgnoreImagAssign = ignimag;
- if (LHSLV.isPropertyRef())
- return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
- else if (LHSLV.isKVCRef())
- return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
- return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Result;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
@@ -569,8 +583,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
TestAndClearIgnoreImag();
bool ignreal = TestAndClearIgnoreRealAssign();
bool ignimag = TestAndClearIgnoreImagAssign();
- assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
- CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
"Invalid assignment");
// Emit the RHS.
ComplexPairTy Val = Visit(E->getRHS());
@@ -578,31 +592,26 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
- // Store into it, if simple.
- if (LHS.isSimple()) {
- EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
-
- // And now return the LHS
- IgnoreReal = ignreal;
- IgnoreImag = ignimag;
- IgnoreRealAssign = ignreal;
- IgnoreImagAssign = ignimag;
- return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
- }
-
- // Otherwise we must have a property setter (no complex vector/bitfields).
+ // Store the result value into the LHS lvalue.
if (LHS.isPropertyRef())
CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
- else
+ else if (LHS.isKVCRef())
CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
+ else
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
- // There is no reload after a store through a method, but we need to restore
- // the Ignore* flags.
+ // Restore the Ignore* flags.
IgnoreReal = ignreal;
IgnoreImag = ignimag;
IgnoreRealAssign = ignreal;
IgnoreImagAssign = ignimag;
- return Val;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return Val;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
index 551a47a..bbd256c 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -52,8 +52,8 @@ private:
bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
- bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitExpr);
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
void AppendPadding(uint64_t NumBytes);
@@ -123,14 +123,9 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
return true;
}
-bool ConstStructBuilder::
- AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
- llvm::Constant *InitCst) {
- llvm::ConstantInt *CI = cast_or_null<llvm::ConstantInt>(InitCst);
- // FIXME: Can this ever happen?
- if (!CI)
- return false;
-
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
if (FieldOffset > NextFieldOffsetInBytes * 8) {
// We need to add padding.
uint64_t NumBytes =
@@ -195,16 +190,43 @@ bool ConstStructBuilder::
Tmp = Tmp.shl(8 - BitsInPreviousByte);
}
- // Or in the bits that go into the previous byte.
- if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
Tmp |= Val->getValue();
- else
- assert(isa<llvm::UndefValue>(Elements.back()));
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(8) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInBytes -= AT->getNumElements();
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(AT->getNumElements()-1);
+ AppendPadding(1);
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(8) &&
+ "Padding addition didn't work right");
+ }
+ }
Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
if (FitsCompletelyInPreviousByte)
- return true;
+ return;
}
while (FieldValue.getBitWidth() > 8) {
@@ -248,7 +270,6 @@ bool ConstStructBuilder::
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
FieldValue));
NextFieldOffsetInBytes++;
- return true;
}
void ConstStructBuilder::AppendPadding(uint64_t NumBytes) {
@@ -346,8 +367,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
return false;
} else {
// Otherwise we have a bitfield.
- if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), EltInit))
- return false;
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ cast<llvm::ConstantInt>(EltInit));
}
}
@@ -443,30 +464,8 @@ public:
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
llvm::Constant *Values[2];
-
- // Get the function pointer (or index if this is a virtual function).
- if (MD->isVirtual()) {
- uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes =
- CGM.getContext().Target.getPointerWidth(0) / 8;
-
- // Itanium C++ ABI 2.3:
- // For a non-virtual function, this field is a simple function pointer.
- // For a virtual function, it is 1 plus the virtual table offset
- // (in bytes) of the function, represented as a ptrdiff_t.
- Values[0] = llvm::ConstantInt::get(PtrDiffTy,
- (Index * PointerWidthInBytes) + 1);
- } else {
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD, Ty);
- Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
- }
+ Values[0] = CGM.GetCXXMemberFunctionPointerValue(MD);
// The adjustment will always be 0.
Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
@@ -930,7 +929,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::Constant *C = llvm::ConstantInt::get(VMContext,
Result.Val.getInt());
- if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ if (C->getType()->isIntegerTy(1)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
@@ -977,7 +976,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
}
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+ if (C && C->getType()->isIntegerTy(1)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
@@ -1009,7 +1008,11 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
// Go through all bases and fill in any null pointer to data members.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
- assert(!I->isVirtual() && "Should not see virtual bases here!");
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
@@ -1088,7 +1091,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
// Go through all bases and fill in any null pointer to data members.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
- assert(!I->isVirtual() && "Should not see virtual bases here!");
+ if (I->isVirtual()) {
+ // FIXME: We should initialize null pointer to data members in virtual
+ // bases here.
+ continue;
+ }
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
@@ -1131,6 +1138,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I) {
const FieldDecl *FD = *I;
+
+ // Ignore bit fields.
+ if (FD->isBitField())
+ continue;
+
unsigned FieldNo = Layout.getLLVMFieldNo(FD);
Elements[FieldNo] = EmitNullConstant(FD->getType());
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
index 2108414..ef38209 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -40,7 +40,8 @@ struct BinOpInfo {
Value *LHS;
Value *RHS;
QualType Ty; // Computation Type.
- const BinaryOperator *E;
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
namespace {
@@ -125,7 +126,7 @@ public:
Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
- Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return EmitNullValue(E->getType());
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
@@ -212,22 +213,27 @@ public:
Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
// Unary Operators.
- Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) {
- LValue LV = EmitLValue(E->getSubExpr());
- return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre);
- }
Value *VisitUnaryPostDec(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, false, false);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
}
Value *VisitUnaryPostInc(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, true, false);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
}
Value *VisitUnaryPreDec(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, false, true);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
}
Value *VisitUnaryPreInc(const UnaryOperator *E) {
- return VisitPrePostIncDec(E, true, true);
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
}
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
return EmitLValue(E->getSubExpr()).getAddress();
}
@@ -291,9 +297,17 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
- if (CGF.getContext().getLangOptions().OverflowChecking
- && Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
@@ -320,7 +334,7 @@ public:
BinOpInfo EmitBinOps(const BinaryOperator *E);
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
- Value *&BitFieldResult);
+ Value *&Result);
Value *EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
@@ -435,8 +449,6 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
- llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
@@ -458,8 +470,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = SrcType->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -481,16 +492,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
@@ -578,12 +587,104 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
}
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
- llvm::SmallVector<llvm::Constant*, 32> indices;
- for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
- indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ llvm::SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i));
+ concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size());
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::Constant* EltMask;
+
+ // Treat vec3 like vec4.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+2))-1);
+ else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+1))-1);
+ else
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts))-1);
+
+ // Mask off the high bits of each shuffle index.
+ llvm::SmallVector<llvm::Constant *, 32> MaskV;
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
+ MaskV.push_back(EltMask);
+
+ Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size());
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i);
+ Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx");
+ Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
+ Value *cmpIndx, *newIndx;
+ cmpIndx = Builder.CreateICmpUGT(Indx,
+ llvm::ConstantInt::get(CGF.Int32Ty, 3),
+ "cmp_shuf_idx");
+ newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1),
+ "shuf_idx_adj");
+ Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
+ }
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins");
+ }
+ return NewV;
}
+
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)));
+ const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ if (VTy->getNumElements() == 3) {
+ if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ uint64_t cVal = CI->getZExtValue();
+ if (cVal > 3) {
+ C = llvm::ConstantInt::get(C->getType(), cVal-1);
+ }
+ }
+ }
+ indices.push_back(C);
+ }
+
Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
@@ -614,10 +715,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Base = Visit(E->getBase());
Value *Idx = Visit(E->getIdx());
bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
- Idx = Builder.CreateIntCast(Idx,
- llvm::Type::getInt32Ty(CGF.getLLVMContext()),
- IdxSigned,
- "vecidxcast");
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
@@ -646,7 +744,6 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
unsigned ResElts = VType->getNumElements();
- const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext());
// Loop over initializers collecting the Value for each, and remembering
// whether the source was swizzle (ExtVectorElementExpr). This will allow
@@ -677,7 +774,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undef -> shuffle (src, undef)
Args.push_back(C);
for (unsigned j = 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
LHS = EI->getVectorOperand();
RHS = V;
@@ -686,11 +783,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// insert into undefshuffle && size match -> shuffle (v, src)
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(getMaskElt(SVV, j, 0, I32Ty));
- Args.push_back(llvm::ConstantInt::get(I32Ty,
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
ResElts + C->getZExtValue()));
for (unsigned j = CurIdx + 1; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
@@ -704,7 +801,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
}
}
- Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
VIsUndefShuffle = false;
++CurIdx;
@@ -728,15 +825,15 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// this shuffle directly into it.
if (VIsUndefShuffle) {
Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
- I32Ty));
+ CGF.Int32Ty));
} else {
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
}
}
for (unsigned j = 0, je = InitElts; j != je; ++j)
- Args.push_back(getMaskElt(SVI, j, Offset, I32Ty));
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
if (VIsUndefShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
@@ -749,20 +846,20 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// to the vector initializer into V.
if (Args.empty()) {
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
Mask, "vext");
Args.clear();
for (unsigned j = 0; j != CurIdx; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j));
for (unsigned j = 0; j != InitElts; ++j)
- Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset));
for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
- Args.push_back(llvm::UndefValue::get(I32Ty));
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
}
// If V is undef, make sure it ends up on the RHS of the shuffle to aid
@@ -781,7 +878,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
- Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+ Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx);
llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
}
@@ -828,6 +925,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
//assert(0 && "Unknown cast kind!");
break;
+ case CastExpr::CK_LValueBitCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfLValue(LValue::MakeAddr(V, CGF.MakeQualifiers(DestTy)),
+ DestTy);
+ }
+
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_BitCast: {
@@ -905,13 +1011,13 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
std::swap(DerivedDecl, BaseDecl);
if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
- CE->getBasePath())) {
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, CE->getBasePath())){
if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- Src = Builder.CreateSub(Src, Adj, "adj");
+ Src = Builder.CreateNSWSub(Src, Adj, "adj");
else
- Src = Builder.CreateAdd(Src, Adj, "adj");
+ Src = Builder.CreateNSWAdd(Src, Adj, "adj");
}
+
return Src;
}
@@ -924,8 +1030,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = E->getType()->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -946,16 +1051,14 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0));
+ Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0));
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
@@ -1020,12 +1123,126 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
// Unary Operators
//===----------------------------------------------------------------------===//
+llvm::Value *ScalarExprEmitter::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType ValTy = E->getSubExpr()->getType();
+ llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy);
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ llvm::Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ int size = CGF.getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy));
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType()->isIntegerTy(1) && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
+ } else if (isa<llvm::IntegerType>(InVal->getType())) {
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+
+ if (!ValTy->isSignedIntegerType())
+ // Unsigned integer inc is always two's complement.
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Defined:
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ break;
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BinaryOperator::Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ }
+ }
+ } else {
+ // Add the inc/dec to the real part.
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
+ }
+ NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+
Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreResultAssign();
- Value *Op = Visit(E->getSubExpr());
- if (Op->getType()->isFPOrFPVectorTy())
- return Builder.CreateFNeg(Op, "neg");
- return Builder.CreateNeg(Op, "neg");
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BinaryOperator::Sub;
+ BinOp.E = E;
+ return EmitSub(BinOp);
}
Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
@@ -1126,6 +1343,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.LHS = Visit(E->getLHS());
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
Result.E = E;
return Result;
}
@@ -1133,9 +1351,8 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
LValue ScalarExprEmitter::EmitCompoundAssignLValue(
const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
- Value *&BitFieldResult) {
+ Value *&Result) {
QualType LHSTy = E->getLHS()->getType();
- BitFieldResult = 0;
BinOpInfo OpInfo;
if (E->getComputationResultType()->isAnyComplexType()) {
@@ -1144,7 +1361,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// actually need the imaginary part of the RHS for multiplication and
// division.)
CGF.ErrorUnsupported(E, "complex compound assignment");
- llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
return LValue();
}
@@ -1152,6 +1369,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// first, plus this should improve codegen a little.
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS());
@@ -1160,7 +1378,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getComputationLHSType());
// Expand the binary operator.
- Value *Result = (this->*Func)(OpInfo);
+ Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
@@ -1169,30 +1387,35 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after the
// assignment...'.
- if (LHSLV.isBitField()) {
- if (!LHSLV.isVolatileQualified()) {
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
- &Result);
- BitFieldResult = Result;
- return LHSLV;
- } else
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
- } else
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ else
CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+
return LHSLV;
}
Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
bool Ignore = TestAndClearIgnoreResultAssign();
- Value *BitFieldResult;
- LValue LHSLV = EmitCompoundAssignLValue(E, Func, BitFieldResult);
- if (BitFieldResult)
- return BitFieldResult;
-
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
if (Ignore)
return 0;
- return EmitLoadOfLValue(LHSLV, E->getType());
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS, E->getType());
}
@@ -1217,7 +1440,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
unsigned IID;
unsigned OpID = 0;
- switch (Ops.E->getOpcode()) {
+ switch (Ops.Opcode) {
case BinaryOperator::Add:
case BinaryOperator::AddAssign:
OpID = 1;
@@ -1265,20 +1488,20 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// long long *__overflow_handler)(long long a, long long b, char op,
// char width)
std::vector<const llvm::Type*> handerArgTypes;
- handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
- handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(CGF.Int64Ty);
+ handerArgTypes.push_back(CGF.Int64Ty);
handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
- llvm::FunctionType *handlerTy = llvm::FunctionType::get(
- llvm::Type::getInt64Ty(VMContext), handerArgTypes, false);
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, handerArgTypes, false);
llvm::Value *handlerFunction =
CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
llvm::PointerType::getUnqual(handlerTy));
handlerFunction = Builder.CreateLoad(handlerFunction);
llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
- Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)),
- Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)),
+ Builder.CreateSExt(Ops.LHS, CGF.Int64Ty),
+ Builder.CreateSExt(Ops.RHS, CGF.Int64Ty),
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
cast<llvm::IntegerType>(opTy)->getBitWidth()));
@@ -1300,49 +1523,56 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (!Ops.Ty->isAnyPointerType()) {
- if (CGF.getContext().getLangOptions().OverflowChecking &&
- Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
-
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
- // Signed integer overflow is undefined behavior.
- if (Ops.Ty->isSignedIntegerType())
- return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
-
return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
}
+ // Must have binary (not unary) expr here. Unary pointer decrement doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
if (Ops.Ty->isPointerType() &&
Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size
- CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+ CGF.ErrorUnsupported(BinOp, "VLA pointer addition");
}
+
Value *Ptr, *Idx;
Expr *IdxExp;
- const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
+ const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>();
const ObjCObjectPointerType *OPT =
- Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+ BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>();
if (PT || OPT) {
Ptr = Ops.LHS;
Idx = Ops.RHS;
- IdxExp = Ops.E->getRHS();
+ IdxExp = BinOp->getRHS();
} else { // int + pointer
- PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
- OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+ PT = BinOp->getRHS()->getType()->getAs<PointerType>();
+ OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>();
assert((PT || OPT) && "Invalid add expr");
Ptr = Ops.RHS;
Idx = Ops.LHS;
- IdxExp = Ops.E->getLHS();
+ IdxExp = BinOp->getLHS();
}
unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+ const llvm::Type *IdxType = CGF.IntPtrTy;
if (IdxExp->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
@@ -1376,30 +1606,37 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (CGF.getContext().getLangOptions().OverflowChecking
- && Ops.Ty->isSignedIntegerType())
- return EmitOverflowCheckedBinOp(Ops);
-
+ if (Ops.Ty->isSignedIntegerType()) {
+ switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
- // Signed integer overflow is undefined behavior.
- if (Ops.Ty->isSignedIntegerType())
- return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
-
return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
}
- if (Ops.E->getLHS()->getType()->isPointerType() &&
- Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
+ // Must have binary (not unary) expr here. Unary pointer increment doesn't
+ // use this path.
+ const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E);
+
+ if (BinOp->getLHS()->getType()->isPointerType() &&
+ BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size for
// ptr-int
// The amount of the division needs to account for the VLA size for
// ptr-ptr.
- CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+ CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction");
}
- const QualType LHSType = Ops.E->getLHS()->getType();
+ const QualType LHSType = BinOp->getLHS()->getType();
const QualType LHSElementType = LHSType->getPointeeType();
if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
// pointer - int
@@ -1408,9 +1645,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType =
- llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
- if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+ const llvm::Type *IdxType = CGF.IntPtrTy;
+ if (BinOp->getRHS()->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
@@ -1615,17 +1851,25 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after
// the assignment...'.
- if (LHS.isBitField()) {
- if (!LHS.isVolatileQualified()) {
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
- &RHS);
- return RHS;
- } else
- CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
- } else
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ else
CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+
+ // If the result is clearly ignored, return now.
if (Ignore)
return 0;
+
+ // Objective-C property assignment never reloads the value following a store.
+ if (LHS.isPropertyRef() || LHS.isKVCRef())
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
return EmitLoadOfLValue(LHS, E->getType());
}
@@ -1925,6 +2169,13 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
DstTy);
}
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *V;
// object->isa or (*object).isa
@@ -1958,12 +2209,12 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
const CompoundAssignOperator *E) {
ScalarExprEmitter Scalar(*this);
- Value *BitFieldResult = 0;
+ Value *Result = 0;
switch (E->getOpcode()) {
#define COMPOUND_OP(Op) \
case BinaryOperator::Op##Assign: \
return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
- BitFieldResult)
+ Result)
COMPOUND_OP(Mul);
COMPOUND_OP(Div);
COMPOUND_OP(Rem);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
index 7c842a9..e735a61 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -90,11 +90,14 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+ QualType ResultType =
+ E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return Runtime.GenerateMessageSendSuper(*this, Return, E->getType(),
+ return Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
E->getSelector(),
OMD->getClassInterface(),
isCategoryImpl,
@@ -104,7 +107,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
E->getMethodDecl());
}
- return Runtime.GenerateMessageSend(*this, Return, E->getType(),
+ return Runtime.GenerateMessageSend(*this, Return, ResultType,
E->getSelector(),
Receiver, Args, OID,
E->getMethodDecl());
@@ -458,7 +461,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
LoadObjCSelf(), Ivar, 0);
const RecordType *RT = FieldType->getAs<RecordType>();
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(getContext());
+ CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor();
if (!Dtor->isTrivial()) {
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
@@ -595,7 +598,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
Args);
} else if (const ObjCImplicitSetterGetterRefExpr *E =
dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
- Selector S = E->getSetterMethod()->getSelector();
+ const ObjCMethodDecl *SetterMD = E->getSetterMethod();
+ Selector S = SetterMD->getSelector();
CallArgList Args;
llvm::Value *Receiver;
if (E->getInterfaceDecl()) {
@@ -606,7 +610,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
return;
} else
Receiver = EmitScalarExpr(E->getBase());
- Args.push_back(std::make_pair(Src, E->getType()));
+ ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
+ Args.push_back(std::make_pair(Src, (*P)->getType()));
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().VoidTy, S,
Receiver,
@@ -778,8 +783,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::ConstantInt::get(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
- llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
- llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+ JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
+ JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
@@ -787,7 +792,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
BreakContinueStack.pop_back();
- EmitBlock(AfterBody);
+ EmitBlock(AfterBody.Block);
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
@@ -823,11 +828,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
LV.getAddress());
}
- EmitBlock(LoopEnd);
+ EmitBlock(LoopEnd.Block);
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
@@ -836,7 +841,9 @@ void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
const ObjCAtSynchronizedStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
CGObjCRuntime::~CGObjCRuntime() {}
+
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
index 6c25afe..f3c80bc 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -17,6 +17,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -162,7 +163,8 @@ public:
const ObjCMethodDecl *Method);
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method);
@@ -179,8 +181,10 @@ public:
virtual llvm::Function *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -197,7 +201,7 @@ public:
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty);
+ llvm::Value *Size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -360,14 +364,16 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
return Builder.CreateCall(ClassLookupFn, ClassName);
}
-llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
if (US == 0)
US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
llvm::GlobalValue::PrivateLinkage,
".objc_untyped_selector_alias"+Sel.getAsString(),
NULL, &TheModule);
-
+ if (lval)
+ return US;
return Builder.CreateLoad(US);
}
@@ -624,8 +630,8 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
// to be on the stack / in those registers at the time) on most platforms,
// and generates a SegV on SPARC. With LLVM it corrupts the stack.
bool isPointerSizedReturn = false;
- if (ResultType->isAnyPointerType() || ResultType->isIntegralType() ||
- ResultType->isVoidType())
+ if (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType())
isPointerSizedReturn = true;
llvm::BasicBlock *startBB = 0;
@@ -1848,245 +1854,167 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
-void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- // Pointer to the personality function
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- true),
- "__gnu_objc_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
- llvm::Value *RethrowFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false), "_Unwind_Resume");
-
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
-
- // @synchronized()
- if (!isTry) {
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncEnter, SyncArg);
- }
+void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(S.getSynchExpr());
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- // Emit the statements in the @try {} block
- CGF.setInvokeDest(TryHandler);
-
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
-
- // Jump to @finally if there is no exception
- CGF.EmitBranchThroughCleanup(FinallyEnd);
-
- // Emit the handlers
- CGF.EmitBlock(TryHandler);
-
- // Get the correct versions of the exception handling intrinsics
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
-
- // Exception object
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> ESelArgs;
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
-
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- // Only @try blocks are allowed @catch blocks, but both can have @finally
- if (isTry) {
- if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- CGF.setInvokeDest(CatchInCatch);
-
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
- const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl,
- CatchStmt->getCatchBody()));
-
- // @catch() and @catch(id) both catch any ObjC exception
- if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
- || CatchDecl->getType()->isObjCQualifiedIdType()) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- ESelArgs.push_back(NULLPtr);
- HasCatchAll = true;
- // No further catches after this one will ever by reached
- break;
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- llvm::Value *EHType =
- MakeConstantString(IDecl->getNameAsString());
- ESelArgs.push_back(EHType);
- }
- }
- }
+ // Acquire the lock.
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
+
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
}
- // Find which handler was matched.
- llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
- ESelArgs.begin(), ESelArgs.end(), "selector");
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- llvm::BasicBlock *Next = 0;
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+}
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ // We handle @finally statements by pushing them as a cleanup
+ // before entering the catch.
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Constant *Rethrow =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
- // Test whether this block matches the type for the selector and branch
- // to Match if it does, or to the next BB if it doesn't.
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
- Next);
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0,
+ Rethrow);
+ }
- CGF.EmitBlock(Match);
- }
+ llvm::SmallVector<CatchHandler, 8> Handlers;
- if (CatchBody) {
- llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
- CGF.ConvertType(CatchParam->getType()));
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // @catch() and @catch(id) both catch any ObjC exception.
+ // Treat them as catch-alls.
+ // FIXME: this is what this code was doing before, but should 'id'
+ // really be catching foreign exceptions?
+ if (!CatchDecl
+ || CatchDecl->getType()->isObjCIdType()
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ Handler.TypeInfo = 0; // catch-all
+
+ // Don't consider any other catches.
+ break;
+ }
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString());
}
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
- // The @finally block is a secondary landing pad for any exceptions thrown in
- // @catch() blocks
- CGF.EmitBlock(CatchInCatch);
- Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- ESelArgs.clear();
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
- // If there is a @catch or @finally clause in outside of this one then we
- // need to make sure that we catch and rethrow it.
- if (PrevLandingPad) {
- ESelArgs.push_back(NULLPtr);
- } else {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- }
- CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
- "selector");
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- CGF.setInvokeDest(PrevLandingPad);
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(FinallyBlock);
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncExit, SyncArg);
- }
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- CGF.EmitBlock(FinallyRethrow);
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr);
- llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
- if (!UnwindBB) {
- CGF.Builder.CreateCall(RethrowFn, ExceptionObject);
- // Exception always thrown, next instruction is never reached.
- CGF.Builder.CreateUnreachable();
- } else {
- // If there is a @catch block outside this scope, we invoke instead of
- // calling because we may return to this function. This is very slow, but
- // some people still do it. It would be nice to add an optimised path for
- // this.
- CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject,
- &ExceptionObject+1);
- }
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
- CGF.EmitBlock(FinallyEnd);
+ if (Cont.Block) {
+ if (Cont.Block->use_empty())
+ delete Cont.Block;
+ else {
+ CGF.EmitBranch(Cont.Block);
+ CGF.EmitBlock(Cont.Block);
+ }
+ }
}
void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -2174,17 +2102,12 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
+ llvm::Value *Size) {
CGBuilderTy B = CGF.Builder;
DestPtr = EnforceType(B, DestPtr, IdTy);
SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
- // FIXME: size_t
- llvm::Value *N = llvm::ConstantInt::get(LongTy, size);
-
- B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, N);
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
index d3bafd7..01ead9e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -16,13 +16,14 @@
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
@@ -31,6 +32,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include <cstdio>
@@ -440,6 +442,15 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
}
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ std::vector<const llvm::Type*> Args;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
@@ -843,6 +854,9 @@ protected:
/// MethodVarNames - uniqued method variable names.
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SetVector<std::string> DefinedCategoryNames;
+
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
@@ -1120,7 +1134,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
@@ -1151,7 +1166,8 @@ public:
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1170,8 +1186,11 @@ public:
virtual llvm::Constant *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -1187,7 +1206,7 @@ public:
llvm::Value *src, llvm::Value *dest);
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *dest, llvm::Value *src,
- QualType Ty);
+ llvm::Value *size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
@@ -1319,7 +1338,8 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
- llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
@@ -1382,8 +1402,9 @@ public:
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
- virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
- { return EmitSelector(Builder, Sel); }
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue = false)
+ { return EmitSelector(Builder, Sel, lvalue); }
/// The NeXT/Apple runtimes do not support typed selectors; just emit an
/// untyped one.
@@ -1412,8 +1433,10 @@ public:
return ObjCTypes.getEnumerationMutationFn();
}
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -1429,7 +1452,7 @@ public:
llvm::Value *src, llvm::Value *dest);
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *dest, llvm::Value *src,
- QualType Ty);
+ llvm::Value *size);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -1483,8 +1506,9 @@ llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
}
/// GetSelector - Return the pointer to the unique'd string for this selector.
-llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
- return EmitSelector(Builder, Sel);
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return EmitSelector(Builder, Sel, lval);
}
llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method) {
@@ -1620,30 +1644,23 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
const llvm::FunctionType *FTy =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
llvm::Constant *Fn = NULL;
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
: ObjCTypes.getSendStretFn(IsSuper);
- } else if (ResultType->isFloatingType()) {
- if (ObjCABI == 2) {
- if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
- Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
- : ObjCTypes.getSendFn2(IsSuper);
- } else {
- Fn = ObjCTypes.getSendFn2(IsSuper);
- }
- } else
- // FIXME. This currently matches gcc's API for x86-32. May need to change
- // for others if we have their API.
- Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
} else {
Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
: ObjCTypes.getSendFn(IsSuper);
}
- assert(Fn && "EmitLegacyMessageSend - unknown API");
- Fn = llvm::ConstantExpr::getBitCast(Fn,
- llvm::PointerType::getUnqual(FTy));
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
}
@@ -1909,10 +1926,18 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
Prop));
}
- if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(),
E = OID->protocol_end(); P != E; ++P)
- PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
+ E = CD->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
// Return null for empty list.
if (Properties.empty())
@@ -2049,6 +2074,7 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
"__OBJC,__category,regular,no_dead_strip",
4, true);
DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
}
// FIXME: Get from somewhere?
@@ -2494,11 +2520,52 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
/*
Objective-C setjmp-longjmp (sjlj) Exception Handling
--
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
The basic framework for a @try-catch-finally is as follows:
{
objc_exception_data d;
@@ -2560,37 +2627,33 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
Rethrows and Jumps-Through-Finally
--
- Support for implicit rethrows and jumping through the finally block is
- handled by storing the current exception-handling context in
- ObjCEHStack.
-
- In order to implement proper @finally semantics, we support one basic
- mechanism for jumping through the finally block to an arbitrary
- destination. Constructs which generate exits from a @try or @catch
- block use this mechanism to implement the proper semantics by chaining
- jumps, as necessary.
-
- This mechanism works like the one used for indirect goto: we
- arbitrarily assign an ID to each destination and store the ID for the
- destination in a variable prior to entering the finally block. At the
- end of the finally block we simply create a switch to the proper
- destination.
-
- Code gen for @synchronized(expr) stmt;
- Effectively generating code for:
- objc_sync_enter(expr);
- @try stmt @finally { objc_sync_exit(expr); }
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
*/
void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) {
bool isTry = isa<ObjCAtTryStmt>(S);
- // Create various blocks we refer to for handling @finally.
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
- llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
// For @synchronized, call objc_sync_enter(sync.expr). The
// evaluation of the expression must occur before we enter the
@@ -2601,75 +2664,139 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
}
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- if (CGF.ObjCEHValueStack.empty())
- CGF.ObjCEHValueStack.push_back(0);
- // If This is a nested @try, caught exception is that of enclosing @try.
- else
- CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back());
// Allocate memory for the exception data and rethrow pointer.
llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
"exceptiondata.ptr");
llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
"_rethrow");
- llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(
- llvm::Type::getInt1Ty(VMContext),
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
+ // Push a normal cleanup to leave the try scope.
+ {
+ CodeGenFunction::CleanupBlock FinallyScope(CGF, NormalCleanup);
+
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ // ~CleanupBlock requires there to be an exit block.
+ CGF.EnsureInsertPoint();
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
- // Enter a new try block and call setjmp.
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
- llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
- "jmpbufarray");
- JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
- TryHandler, TryBlock);
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryBlock, TryHandler);
- // Emit the @try block.
+ // Emit the protected block.
CGF.EmitBlock(TryBlock);
CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
CGF.EmitBranchThroughCleanup(FinallyEnd);
- // Emit the "exception in @try" block.
+ // Emit the exception handler block.
CGF.EmitBlock(TryHandler);
// Retrieve the exception object. We may emit multiple blocks but
// nothing can cross this so the value is already in SSA form.
- llvm::Value *Caught =
+ llvm::CallInst *Caught =
CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
ExceptionData, "caught");
- CGF.ObjCEHValueStack.back() = Caught;
- if (!isTry) {
- CGF.Builder.CreateStore(Caught, RethrowPtr);
+ Caught->setDoesNotThrow();
+
+ // Remember the exception to rethrow.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Note: at this point, objc_exception_throw already popped the
+ // catch handler, so anything that branches to the cleanup needs
+ // to set CallTryExitVar to false.
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
// Enter a new exception try block (in case a @catch block throws
- // an exception).
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ // an exception). Now CallTryExitVar (currently true) is back in
+ // synch with reality.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
- llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
- llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch_for_catch");
CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
CGF.EmitBlock(CatchBlock);
@@ -2680,7 +2807,6 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
bool AllMatched = false;
for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
- llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
const ObjCObjectPointerType *OPT = 0;
@@ -2691,47 +2817,67 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
} else {
OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
- // catch(id e) always matches.
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
// FIXME: For the time being we also match id<X>; this should
// be rejected by Sema instead.
if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
AllMatched = true;
}
+ // If this is a catch-all, we don't need to test anything.
if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
if (CatchParam) {
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
}
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
break;
}
assert(OPT && "Unexpected non-object pointer type in @catch");
const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
assert(IDecl && "Catch parameter must have Objective-C type!");
// Check if the @catch block matches the exception object.
llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
- llvm::Value *Match =
+ llvm::CallInst *Match =
CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
Class, Caught, "match");
+ Match->setDoesNotThrow();
- llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
MatchedBlock, NextCatchBlock);
// Emit the @catch block.
CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ // Initialize the catch variable.
llvm::Value *Tmp =
CGF.Builder.CreateBitCast(Caught,
CGF.ConvertType(CatchParam->getType()),
@@ -2739,11 +2885,17 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
CGF.EmitBlock(NextCatchBlock);
}
+ CGF.ObjCEHValueStack.pop_back();
+
if (!AllMatched) {
// None of the handlers caught the exception, so store it to be
// rethrown at the end of the @finally block.
@@ -2753,59 +2905,34 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Emit the exception handler for the @catch blocks.
CGF.EmitBlock(CatchHandler);
- CGF.Builder.CreateStore(
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData),
- RethrowPtr);
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else {
+
+ // Rethrow the new exception, not the old one.
+ Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Don't pop the catch handler; the throw already did.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
}
- // Pop the exception-handling stack entry. It is important to do
- // this now, because the code in the @finally block is not in this
- // context.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.ObjCEHValueStack.pop_back();
-
- // Emit the @finally block.
- CGF.EmitBlock(FinallyBlock);
- llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+ // Pop the cleanup.
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.Block);
- CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
-
- CGF.EmitBlock(FinallyExit);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
-
- CGF.EmitBlock(FinallyNoExit);
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit objc_sync_exit(expr); as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ // Emit the rethrow block.
+ CGF.Builder.ClearInsertionPoint();
+ CGF.EmitBlock(FinallyRethrow.Block, true);
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr))
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
}
- // Emit the switch block
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(FinallyRethrow);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.Builder.CreateUnreachable();
-
- CGF.EmitBlock(FinallyEnd);
+ CGF.Builder.SetInsertPoint(FinallyEnd.Block);
}
void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -2822,7 +2949,8 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
@@ -2929,15 +3057,11 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
- // Get size info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
+ llvm::Value *size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
- DestPtr, SrcPtr, N);
+ DestPtr, SrcPtr, size);
return;
}
@@ -2997,12 +3121,14 @@ void CGObjCCommonMac::EmitImageInfo() {
// We never allow @synthesize of a superclass property.
flags |= eImageInfo_CorrectedSynthesize;
+ const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+
// Emitted as int[2];
llvm::Constant *values[2] = {
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags)
+ llvm::ConstantInt::get(Int32Ty, version),
+ llvm::ConstantInt::get(Int32Ty, flags)
};
- llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2);
const char *Section;
if (ObjCABI == 1)
@@ -3102,7 +3228,8 @@ llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
return Builder.CreateLoad(Entry, "tmp");
}
-llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
@@ -3115,6 +3242,8 @@ llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
4, true);
}
+ if (lvalue)
+ return Entry;
return Builder.CreateLoad(Entry, "tmp");
}
@@ -3632,8 +3761,14 @@ void CGObjCMac::FinishModule() {
OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
<< "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
- e = LazySymbols.end(); I != e; ++I)
+ e = LazySymbols.end(); I != e; ++I) {
OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
CGM.getModule().setModuleInlineAsm(OS.str());
}
@@ -3949,8 +4084,9 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::Type::getInt8PtrTy(VMContext), 4);
ExceptionDataTy =
- llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
- SetJmpBufferSize),
+ llvm::StructType::get(VMContext,
+ llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+ SetJmpBufferSize),
StackPtrTy, NULL);
CGM.getModule().addTypeName("struct._objc_exception_data",
ExceptionDataTy);
@@ -5147,7 +5283,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
FunctionType::ExtInfo());
llvm::Constant *Fn = 0;
std::string Name("\01l_");
- if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ if (CGM.ReturnTypeUsesSRet(FnInfo)) {
#if 0
// unlike what is documented. gcc never generates this API!!
if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
@@ -5164,14 +5300,9 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
Fn = ObjCTypes.getMessageSendStretFixupFn();
Name += "objc_msgSend_stret_fixup";
}
- } else if (!IsSuper && ResultType->isFloatingType()) {
- if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) {
- Fn = ObjCTypes.getMessageSendFpretFixupFn();
- Name += "objc_msgSend_fpret_fixup";
- } else {
- Fn = ObjCTypes.getMessageSendFixupFn();
- Name += "objc_msgSend_fixup";
- }
+ } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
} else {
#if 0
// unlike what is documented. gcc never generates this API!!
@@ -5403,7 +5534,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
- Selector Sel) {
+ Selector Sel, bool lval) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
@@ -5418,6 +5549,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
CGM.AddUsedGlobal(Entry);
}
+ if (lval)
+ return Entry;
return Builder.CreateLoad(Entry, "tmp");
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -5467,15 +5600,11 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) {
- // Get size info for this aggregate.
- std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
- unsigned long size = TypeInfo.first/8;
+ llvm::Value *Size) {
SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
- llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
- DestPtr, SrcPtr, N);
+ DestPtr, SrcPtr, Size);
return;
}
@@ -5535,75 +5664,92 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
void
-CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr());
- // For @synchronized, call objc_sync_enter(sync.expr). The
- // evaluation of the expression must occur before we enter the
- // @synchronized. We can safely avoid a temp here because jumps into
- // @synchronized are illegal & this will dominate uses.
- llvm::Value *SyncArg = 0;
- if (!isTry) {
- SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ // Acquire the lock.
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
+
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
}
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- CGF.setInvokeDest(TryHandler);
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
- // Emit the exception handler.
+ struct CallObjCEndCatch : EHScopeStack::LazyCleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
- CGF.EmitBlock(TryHandler);
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn, 0, 0);
+ }
+ };
+}
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
-
- // Construct the lists of (type, catch body) to handle.
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
- bool HasCatchAll = false;
- if (isTry) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
+ ObjCTypes.getObjCBeginCatchFn(),
+ ObjCTypes.getObjCEndCatchFn(),
+ ObjCTypes.getExceptionRethrowFn());
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
- // catch(...) always matches.
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
if (!CatchDecl) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
break;
}
+ // There's a particular fixed type info for 'id'.
if (CatchDecl->getType()->isObjCIdType() ||
CatchDecl->getType()->isObjCQualifiedIdType()) {
llvm::Value *IDEHType =
@@ -5614,7 +5760,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
false,
llvm::GlobalValue::ExternalLinkage,
0, "OBJC_EHTYPE_id");
- SelectorArgs.push_back(IDEHType);
+ Handler.TypeInfo = IDEHType;
} else {
// All other types should be Objective-C interface pointer types.
const ObjCObjectPointerType *PT =
@@ -5622,207 +5768,101 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
assert(PT && "Invalid @catch type.");
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
- SelectorArgs.push_back(EHType);
+ Handler.TypeInfo = GetInterfaceEHType(IT->getDecl(), false);
}
}
- }
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- // Even though this is a cleanup, treat it as a catch all to avoid the C++
- // personality behavior of terminating the process if only cleanups are
- // found in the exception handling stack.
- SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- llvm::Value *Selector =
- CGF.Builder.CreateCall(llvm_eh_selector,
- SelectorArgs.begin(), SelectorArgs.end(),
- "selector");
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
-
- llvm::BasicBlock *Next = 0;
-
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id =
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(SelectorArgs[i+2],
- ObjCTypes.Int8PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(Match);
- }
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
- if (CatchBody) {
- llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
-
- // Cleanups must call objc_end_catch.
- CGF.PushCleanupBlock(MatchEnd);
-
- llvm::Value *ExcObject =
- CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- ExcObject =
- CGF.Builder.CreateBitCast(ExcObject,
- CGF.ConvertType(CatchParam->getType()));
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- // Exceptions inside the catch block must be rethrown. We set a special
- // purpose invoke destination for this which just collects the thrown
- // exception and overwrites the object in RethrowPtr, branches through the
- // match.end to make sure we call objc_end_catch, before branching to the
- // rethrow handler.
- llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
- CGF.setInvokeDest(MatchHandler);
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
- CGF.setInvokeDest(0);
+ // Enter the catch.
+ llvm::CallInst *Exn =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn,
+ "exn.adjusted");
+ Exn->setDoesNotThrow();
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+ CGF.EHStack.pushLazyCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ ObjCTypes.getObjCEndCatchFn());
- // Don't emit the extra match handler if there we no unprotected calls in
- // the catch block.
- if (MatchHandler->use_empty()) {
- delete MatchHandler;
- } else {
- CGF.EmitBlock(MatchHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.EmitBlock(MatchEnd);
-
- // Unfortunately, we also have to generate another EH frame here
- // in case this throws.
- llvm::BasicBlock *MatchEndHandler =
- CGF.createBasicBlock("match.end.handler");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
- Cont, MatchEndHandler);
-
- CGF.EmitBlock(Cont);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(MatchEndHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
- }
+ // Leave the earlier cleanup.
+ CGF.PopCleanupBlock();
- // Pop the cleanup entry, the @finally is outside this cleanup
- // scope.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
- CGF.setInvokeDest(PrevLandingPad);
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- CGF.EmitBlock(FinallyBlock);
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
- }
-
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
-
- // Generate the rethrow code, taking care to use an invoke if we are in a
- // nested exception scope.
- CGF.EmitBlock(FinallyRethrow);
- if (PrevLandingPad) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(),
- Cont, PrevLandingPad,
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.EmitBlock(Cont);
- } else {
- CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- }
- CGF.Builder.CreateUnreachable();
+ // Pop out of the normal cleanup on the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
- CGF.EmitBlock(FinallyEnd);
+ if (Cont.Block)
+ CGF.EmitBlock(Cont.Block);
}
/// EmitThrowStmt - Generate code for a throw statement.
void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
llvm::Value *Exception;
+ llvm::Constant *FunctionThrowOrRethrow;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
Exception = CGF.EmitScalarExpr(ThrowExpr);
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn();
} else {
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
Exception = CGF.ObjCEHValueStack.back();
+ FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn();
}
llvm::Value *ExceptionAsObject =
CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
if (InvokeDest) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
- Cont, InvokeDest,
+ CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
+ CGF.getUnreachableBlock(), InvokeDest,
&ExceptionAsObject, &ExceptionAsObject + 1);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
- CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
@@ -5863,7 +5903,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
llvm::GlobalValue::ExternalLinkage,
0, VTableName);
- llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+ llvm::Value *VTableIdx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
std::vector<llvm::Constant*> Values(3);
Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
index 8de7f10..eb79f09 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -97,7 +97,7 @@ public:
/// return value should have the LLVM type for pointer-to
/// ASTContext::getObjCSelType().
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
- Selector Sel) = 0;
+ Selector Sel, bool lval=false) = 0;
/// Get a typed selector.
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
@@ -181,8 +181,10 @@ public:
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) = 0;
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) = 0;
virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
@@ -208,7 +210,7 @@ public:
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
- QualType Ty) = 0;
+ llvm::Value *Size) = 0;
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
index aec1c45..1cca977 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -271,7 +271,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
// Get the key function.
const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
- if (KeyFunction && !KeyFunction->getBody()) {
+ if (KeyFunction && !KeyFunction->hasBody()) {
// The class has a key function, but it is not defined in this translation
// unit, so we should use the external descriptor for it.
return true;
@@ -728,15 +728,19 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
QualType PointeeTy = Ty->getPointeeType();
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to
- unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+ unsigned Flags = ComputeQualifierFlags(Quals);
// Itanium C++ ABI 2.9.5p7:
// When the abi::__pbase_type_info is for a direct or indirect pointer to an
// incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(PointeeTy))
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
const llvm::Type *UnsignedIntLTy =
@@ -747,7 +751,7 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
Fields.push_back(PointeeTypeInfo);
}
@@ -756,17 +760,21 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
QualType PointeeTy = Ty->getPointeeType();
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to.
- unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+ unsigned Flags = ComputeQualifierFlags(Quals);
const RecordType *ClassType = cast<RecordType>(Ty->getClass());
// Itanium C++ ABI 2.9.5p7:
// When the abi::__pbase_type_info is for a direct or indirect pointer to an
// incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(PointeeTy))
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
if (IsIncompleteClassType(ClassType))
@@ -780,7 +788,7 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
Fields.push_back(PointeeTypeInfo);
// Itanium C++ ABI 2.9.5p9:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
index efde380..b72725e 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -79,11 +79,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
// function.
- // We can't erase blocks with an associated cleanup size here since the
- // memory might be reused, leaving the old cleanup info pointing at a new
- // block.
if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
+ if (CurBB->empty() && CurBB->use_empty()) {
CurBB->eraseFromParent();
Builder.ClearInsertionPoint();
}
@@ -159,7 +156,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
}
// Keep track of the current cleanup stack depth.
- CleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
@@ -198,7 +195,7 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
// If there is a cleanup stack, then we it isn't worth trying to
// simplify this block (we would need to remove it from the scope map
// and cleanup entry).
- if (!CleanupEntries.empty())
+ if (!EHStack.empty())
return;
// Can only simplify direct branches.
@@ -221,18 +218,6 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
return;
}
- // If necessary, associate the block with the cleanup stack size.
- if (!CleanupEntries.empty()) {
- // Check if the basic block has already been inserted.
- BlockScopeMap::iterator I = BlockScopes.find(BB);
- if (I != BlockScopes.end()) {
- assert(I->second == CleanupEntries.size() - 1);
- } else {
- BlockScopes[BB] = CleanupEntries.size() - 1;
- CleanupEntries.back().Blocks.push_back(BB);
- }
- }
-
// Place the block after the current block, if possible, or else at
// the end of the function.
if (CurBB && CurBB->getParent())
@@ -259,8 +244,35 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
Builder.ClearInsertionPoint();
}
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
+ JumpDest &Dest = LabelMap[S];
+ if (Dest.Block) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest.Block = createBasicBlock(S->getName());
+ Dest.ScopeDepth = EHScopeStack::stable_iterator::invalid();
+ return Dest;
+}
+
void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- EmitBlock(getBasicBlockForLabel(&S));
+ JumpDest &Dest = LabelMap[&S];
+
+ // If we didn't needed a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.Block) {
+ Dest = getJumpDestInCurrentScope(S.getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.ScopeDepth.isValid() && "already emitted label!");
+ Dest.ScopeDepth = EHStack.stable_begin();
+
+ EHStack.resolveBranchFixups(Dest.Block);
+ }
+
+ EmitBlock(Dest.Block);
}
@@ -276,7 +288,7 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
}
@@ -301,7 +313,7 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
@@ -318,7 +330,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// This avoids emitting dead code and simplifies the CFG substantially.
if (!ContainsLabel(Skipped)) {
if (Executed) {
- CleanupScope ExecutedScope(*this);
+ RunCleanupsScope ExecutedScope(*this);
EmitStmt(Executed);
}
return;
@@ -337,7 +349,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Emit the 'then' code.
EmitBlock(ThenBlock);
{
- CleanupScope ThenScope(*this);
+ RunCleanupsScope ThenScope(*this);
EmitStmt(S.getThen());
}
EmitBranch(ContBlock);
@@ -346,7 +358,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
if (const Stmt *Else = S.getElse()) {
EmitBlock(ElseBlock);
{
- CleanupScope ElseScope(*this);
+ RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
}
EmitBranch(ContBlock);
@@ -357,20 +369,17 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
- // Emit the header for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
- EmitBlock(LoopHeader);
-
- // Create an exit block for when the condition fails, create a block for the
- // body of the loop.
- llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
- llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
- llvm::BasicBlock *CleanupBlock = 0;
- llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.Block);
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
// C++ [stmt.while]p2:
// When the condition of a while statement is a declaration, the
@@ -379,18 +388,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
// [...]
// The object created in a condition is destroyed and created
// with each iteration of the loop.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
- if (S.getConditionVariable()) {
+ if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- // If this condition variable requires cleanups, create a basic
- // block to handle those cleanups.
- if (ConditionScope.requiresCleanups()) {
- CleanupBlock = createBasicBlock("while.cleanup");
- EffectiveExitBlock = CleanupBlock;
- }
- }
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
@@ -405,61 +406,63 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
EmitBoolCondBranch = false;
// As long as the condition is true, go to the loop body.
- if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
- // Emit the loop body.
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
{
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitBlock(LoopBody);
EmitStmt(S.getBody());
}
BreakContinueStack.pop_back();
- if (CleanupBlock) {
- // If we have a cleanup block, jump there to perform cleanups
- // before looping.
- EmitBranch(CleanupBlock);
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
- // Emit the cleanup block, performing cleanups for the condition
- // and then jumping to either the loop header or the exit block.
- EmitBlock(CleanupBlock);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
- } else {
- // Cycle to the condition.
- EmitBranch(LoopHeader);
- }
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.Block);
// Emit the exit block.
- EmitBlock(ExitBlock, true);
-
+ EmitBlock(LoopExit.Block, true);
// The LoopHeader typically is just a branch if we skipped emitting
// a branch, try to erase it.
- if (!EmitBoolCondBranch && !CleanupBlock)
- SimplifyForwardingBlocks(LoopHeader);
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.Block);
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
- // Emit the body for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
- EmitBlock(LoopBody);
-
- llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
- // Emit the body of the loop into the block.
- EmitStmt(S.getBody());
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
BreakContinueStack.pop_back();
- EmitBlock(DoCond);
+ EmitBlock(LoopCond.Block);
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -478,47 +481,49 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.Block);
// Emit the exit block.
- EmitBlock(AfterDo);
+ EmitBlock(LoopExit.Block);
// The DoCond block typically is just a branch if we skipped
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
- SimplifyForwardingBlocks(DoCond);
+ SimplifyForwardingBlocks(LoopCond.Block);
}
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
- CleanupScope ForScope(*this);
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
// Evaluate the first part before the loop.
if (S.getInit())
EmitStmt(S.getInit());
// Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
- llvm::BasicBlock *IncBlock = 0;
- llvm::BasicBlock *CondCleanup = 0;
- llvm::BasicBlock *EffectiveExitBlock = AfterFor;
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.Block;
EmitBlock(CondBlock);
// Create a cleanup scope for the condition variable cleanups.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
if (S.getConditionVariable()) {
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- if (ConditionScope.requiresCleanups()) {
- CondCleanup = createBasicBlock("for.cond.cleanup");
- EffectiveExitBlock = CondCleanup;
- }
}
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
@@ -526,7 +531,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
EmitBlock(ForBody);
} else {
@@ -535,17 +545,15 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
}
// If the for loop doesn't have an increment we can just use the
- // condition as the continue block.
- llvm::BasicBlock *ContinueBlock;
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
if (S.getInc())
- ContinueBlock = IncBlock = createBasicBlock("for.inc");
- else
- ContinueBlock = CondBlock;
+ Continue = getJumpDestInCurrentScope("for.inc");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
- // If the condition is true, execute the body of the for stmt.
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getSourceRange().getBegin());
@@ -555,37 +563,30 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitStmt(S.getBody());
}
// If there is an increment, emit it next.
if (S.getInc()) {
- EmitBlock(IncBlock);
+ EmitBlock(Continue.Block);
EmitStmt(S.getInc());
}
BreakContinueStack.pop_back();
-
- // Finally, branch back up to the condition for the next iteration.
- if (CondCleanup) {
- // Branch to the cleanup block.
- EmitBranch(CondCleanup);
-
- // Emit the cleanup block, which branches back to the loop body or
- // outside of the for statement once it is done.
- EmitBlock(CondCleanup);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
- } else
- EmitBranch(CondBlock);
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
if (DI) {
DI->setLocation(S.getSourceRange().getEnd());
DI->EmitRegionEnd(CurFn, Builder);
}
// Emit the fall-through block.
- EmitBlock(AfterFor, true);
+ EmitBlock(LoopExit.Block, true);
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
@@ -631,7 +632,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (FnRetTy->isReferenceType()) {
// If this function returns a reference, take the address of the expression
// rather than the value.
- RValue Result = EmitReferenceBindingToExpr(RV, false);
+ RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
} else if (!hasAggregateLLVMType(RV->getType())) {
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
@@ -666,7 +667,7 @@ void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
EmitBranchThroughCleanup(Block);
}
@@ -679,7 +680,7 @@ void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
EmitBranchThroughCleanup(Block);
}
@@ -788,7 +789,9 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
}
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
- CleanupScope ConditionScope(*this);
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
@@ -803,7 +806,6 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// statement. We also need to create a default block now so that
// explicit case ranges tests can have a place to jump to on
// failure.
- llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
CaseRangeBlock = DefaultBlock;
@@ -813,12 +815,11 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// All break statements jump to NextBlock. If BreakContinueStack is non empty
// then reuse last ContinueBlock.
- llvm::BasicBlock *ContinueBlock = 0;
+ JumpDest OuterContinue;
if (!BreakContinueStack.empty())
- ContinueBlock = BreakContinueStack.back().ContinueBlock;
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
- // Ensure any vlas created between there and here, are undone
- BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
// Emit switch body.
EmitStmt(S.getBody());
@@ -829,15 +830,22 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// been chained on top.
SwitchInsn->setSuccessor(0, CaseRangeBlock);
- // If a default was never emitted then reroute any jumps to it and
- // discard.
+ // If a default was never emitted:
if (!DefaultBlock->getParent()) {
- DefaultBlock->replaceAllUsesWith(NextBlock);
- delete DefaultBlock;
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.Block);
+ delete DefaultBlock;
+ }
}
// Emit continuation.
- EmitBlock(NextBlock, true);
+ EmitBlock(SwitchExit.Block, true);
SwitchInsn = SavedSwitchInsn;
CaseRangeBlock = SavedCRBlock;
@@ -1066,8 +1074,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getContext().getTypeSize(InputTy)) {
// Use ptrtoint as appropriate so that we can do our extension.
if (isa<llvm::PointerType>(Arg->getType()))
- Arg = Builder.CreatePtrToInt(Arg,
- llvm::IntegerType::get(VMContext, LLVMPointerWidth));
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
const llvm::Type *OutputTy = ConvertType(OutputType);
if (isa<llvm::IntegerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, OutputTy);
@@ -1132,7 +1139,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// call.
unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
llvm::Value *LocIDC =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID);
+ llvm::ConstantInt::get(Int32Ty, LocID);
Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
// Extract all of the register value results from the asm.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
index a8f0467..fd7c616 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp
@@ -15,14 +15,38 @@
using namespace clang;
using namespace CodeGen;
-void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
- llvm::Value *Ptr) {
- assert((LiveTemporaries.empty() ||
- LiveTemporaries.back().ThisPtr != Ptr ||
- ConditionalBranchLevel) &&
- "Pushed the same temporary twice; AST is likely wrong");
- llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
+static void EmitTemporaryCleanup(CodeGenFunction &CGF,
+ const CXXTemporary *Temporary,
+ llvm::Value *Addr,
+ llvm::Value *CondPtr) {
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (CondPtr) {
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("temp.cond-dtor.call");
+ CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
+
+ llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
+ CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ CGF.EmitBlock(CondBlock);
+ }
+
+ CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Addr);
+
+ if (CondPtr) {
+ // Reset the condition to false.
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ CondPtr);
+ CGF.EmitBlock(CondEnd);
+ }
+}
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
llvm::AllocaInst *CondPtr = 0;
// Check if temporaries need to be conditional. If so, we'll create a
@@ -38,82 +62,13 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
}
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
- CondPtr));
-
- PushCleanupBlock(DtorBlock);
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
if (Exceptions) {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
- llvm::BasicBlock *CondEnd = 0;
-
- EHCleanupBlock Cleanup(*this);
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
- }
- }
-}
-
-void CodeGenFunction::PopCXXTemporary() {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
-
- CleanupBlockInfo CleanupInfo = PopCleanupBlock();
- assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
- "Cleanup block mismatch!");
- assert(!CleanupInfo.SwitchBlock &&
- "Should not have a switch block for temporary cleanup!");
- assert(!CleanupInfo.EndBlock &&
- "Should not have an end block for temporary cleanup!");
-
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.DtorBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
- delete Info.DtorBlock;
- } else
- EmitBlock(Info.DtorBlock);
-
- llvm::BasicBlock *CondEnd = 0;
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
+ Cleanup.beginEHCleanup();
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
}
-
- LiveTemporaries.pop_back();
}
RValue
@@ -121,40 +76,23 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
llvm::Value *AggLoc,
bool IsAggLocVolatile,
bool IsInitializer) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
RValue RV;
-
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
}
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
-
return RV;
}
LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
const CXXExprWithTemporaries *E) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- LValue LV = EmitLValue(E->getSubExpr());
-
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
-
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
+ LValue LV;
+ {
+ RunCleanupsScope Scope(*this);
+ LV = EmitLValue(E->getSubExpr());
+ }
return LV;
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
index 0f023e6..6abac26 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -87,112 +87,61 @@ private:
/// MostDerivedClassLayout - the AST record layout of the most derived class.
const ASTRecordLayout &MostDerivedClassLayout;
- /// BaseSubobjectMethodPairTy - Uniquely identifies a member function
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
/// in a base subobject.
- typedef std::pair<BaseSubobject, const CXXMethodDecl *>
- BaseSubobjectMethodPairTy;
-
- typedef llvm::DenseMap<BaseSubobjectMethodPairTy,
+ typedef std::pair<const CXXMethodDecl *, uint64_t> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
OverriderInfo> OverridersMapTy;
/// OverridersMap - The final overriders for all virtual member functions of
/// all the base subobjects of the most derived class.
OverridersMapTy OverridersMap;
- /// VisitedVirtualBases - A set of all the visited virtual bases, used to
- /// avoid visiting virtual bases more than once.
- llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ uint64_t> SubobjectOffsetMapTy;
- typedef llvm::DenseMap<BaseSubobjectMethodPairTy, BaseOffset>
- AdjustmentOffsetsMapTy;
-
- /// ReturnAdjustments - Holds return adjustments for all the overriders that
- /// need to perform return value adjustments.
- AdjustmentOffsetsMapTy ReturnAdjustments;
-
- // FIXME: We might be able to get away with making this a SmallSet.
- typedef llvm::SmallSetVector<uint64_t, 2> OffsetSetVectorTy;
-
- /// SubobjectOffsetsMapTy - This map is used for keeping track of all the
- /// base subobject offsets that a single class declaration might refer to.
- ///
- /// For example, in:
- ///
- /// struct A { virtual void f(); };
- /// struct B1 : A { };
- /// struct B2 : A { };
- /// struct C : B1, B2 { virtual void f(); };
- ///
- /// when we determine that C::f() overrides A::f(), we need to update the
- /// overriders map for both A-in-B1 and A-in-B2 and the subobject offsets map
- /// will have the subobject offsets for both A copies.
- typedef llvm::DenseMap<const CXXRecordDecl *, OffsetSetVectorTy>
- SubobjectOffsetsMapTy;
-
- /// ComputeFinalOverriders - Compute the final overriders for a given base
- /// subobject (and all its direct and indirect bases).
- void ComputeFinalOverriders(BaseSubobject Base,
- bool BaseSubobjectIsVisitedVBase,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets);
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
- /// AddOverriders - Add the final overriders for this base subobject to the
- /// map of final overriders.
- void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets);
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
- /// PropagateOverrider - Propagate the NewMD overrider to all the functions
- /// that OldMD overrides. For example, if we have:
- ///
- /// struct A { virtual void f(); };
- /// struct B : A { virtual void f(); };
- /// struct C : B { virtual void f(); };
- ///
- /// and we want to override B::f with C::f, we also need to override A::f with
- /// C::f.
- void PropagateOverrider(const CXXMethodDecl *OldMD,
- BaseSubobject NewBase,
- uint64_t OverriderOffsetInLayoutClass,
- const CXXMethodDecl *NewMD,
- SubobjectOffsetsMapTy &Offsets);
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
- static void MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
- SubobjectOffsetsMapTy &Offsets);
-
public:
FinalOverriders(const CXXRecordDecl *MostDerivedClass,
uint64_t MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass);
/// getOverrider - Get the final overrider for the given method declaration in
- /// the given base subobject.
- OverriderInfo getOverrider(BaseSubobject Base,
- const CXXMethodDecl *MD) const {
- assert(OverridersMap.count(std::make_pair(Base, MD)) &&
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ uint64_t BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
"Did not find overrider!");
- return OverridersMap.lookup(std::make_pair(Base, MD));
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
}
- /// getReturnAdjustmentOffset - Get the return adjustment offset for the
- /// method decl in the given base subobject. Returns an empty base offset if
- /// no adjustment is needed.
- BaseOffset getReturnAdjustmentOffset(BaseSubobject Base,
- const CXXMethodDecl *MD) const {
- return ReturnAdjustments.lookup(std::make_pair(Base, MD));
- }
-
/// dump - dump the final overriders.
void dump() {
- assert(VisitedVirtualBases.empty() &&
- "Visited virtual bases aren't empty!");
- dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0));
- VisitedVirtualBases.clear();
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0), VisitedVirtualBases);
}
- /// dump - dump the final overriders for a base subobject, and all its direct
- /// and indirect base subobjects.
- void dump(llvm::raw_ostream &Out, BaseSubobject Base);
};
#define DUMP_OVERRIDERS 0
@@ -204,54 +153,57 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
Context(MostDerivedClass->getASTContext()),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
-
- // Compute the final overriders.
- SubobjectOffsetsMapTy Offsets;
- ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0),
- /*BaseSubobjectIsVisitedVBase=*/false,
- MostDerivedClassOffset, Offsets);
- VisitedVirtualBases.clear();
-#if DUMP_OVERRIDERS
- // And dump them (for now).
- dump();
-
- // Also dump the base offsets (for now).
- for (SubobjectOffsetsMapTy::const_iterator I = Offsets.begin(),
- E = Offsets.end(); I != E; ++I) {
- const OffsetSetVectorTy& OffsetSetVector = I->second;
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, 0), /*IsVirtual=*/false,
+ MostDerivedClassOffset, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
- llvm::errs() << "Base offsets for ";
- llvm::errs() << I->first->getQualifiedNameAsString() << '\n';
+ // Get the the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
- for (unsigned I = 0, E = OffsetSetVector.size(); I != E; ++I)
- llvm::errs() << " " << I << " - " << OffsetSetVector[I] / 8 << '\n';
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const OverridingMethods& Methods = I->second;
+
+ for (OverridingMethods::const_iterator I = Methods.begin(),
+ E = Methods.end(); I != E; ++I) {
+ unsigned SubobjectNumber = I->first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ uint64_t BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(I->second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = I->second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ uint64_t OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ }
}
-#endif
-}
-
-void FinalOverriders::AddOverriders(BaseSubobject Base,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets) {
- const CXXRecordDecl *RD = Base.getBase();
-
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
- if (!MD->isVirtual())
- continue;
- // First, propagate the overrider.
- PropagateOverrider(MD, Base, OffsetInLayoutClass, MD, Offsets);
-
- // Add the overrider as the final overrider of itself.
- OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)];
- assert(!Overrider.Method && "Overrider should not exist yet!");
-
- Overrider.Offset = OffsetInLayoutClass;
- Overrider.Method = MD;
- }
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
}
static BaseOffset ComputeBaseOffset(ASTContext &Context,
@@ -365,153 +317,64 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
-void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
- BaseSubobject NewBase,
- uint64_t OverriderOffsetInLayoutClass,
- const CXXMethodDecl *NewMD,
- SubobjectOffsetsMapTy &Offsets) {
- for (CXXMethodDecl::method_iterator I = OldMD->begin_overridden_methods(),
- E = OldMD->end_overridden_methods(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
- const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
-
- // We want to override OverriddenMD in all subobjects, for example:
- //
- /// struct A { virtual void f(); };
- /// struct B1 : A { };
- /// struct B2 : A { };
- /// struct C : B1, B2 { virtual void f(); };
- ///
- /// When overriding A::f with C::f we need to do so in both A subobjects.
- const OffsetSetVectorTy &OffsetVector = Offsets[OverriddenRD];
-
- // Go through all the subobjects.
- for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) {
- uint64_t Offset = OffsetVector[I];
-
- BaseSubobject OverriddenSubobject = BaseSubobject(OverriddenRD, Offset);
- BaseSubobjectMethodPairTy SubobjectAndMethod =
- std::make_pair(OverriddenSubobject, OverriddenMD);
-
- OverriderInfo &Overrider = OverridersMap[SubobjectAndMethod];
-
- assert(Overrider.Method && "Did not find existing overrider!");
-
- // Check if we need return adjustments or base adjustments.
- // (We don't want to do this for pure virtual member functions).
- if (!NewMD->isPure()) {
- // Get the return adjustment base offset.
- BaseOffset ReturnBaseOffset =
- ComputeReturnAdjustmentBaseOffset(Context, NewMD, OverriddenMD);
-
- if (!ReturnBaseOffset.isEmpty()) {
- // Store the return adjustment base offset.
- ReturnAdjustments[SubobjectAndMethod] = ReturnBaseOffset;
- }
- }
-
- // Set the new overrider.
- Overrider.Offset = OverriderOffsetInLayoutClass;
- Overrider.Method = NewMD;
-
- // And propagate it further.
- PropagateOverrider(OverriddenMD, NewBase, OverriderOffsetInLayoutClass,
- NewMD, Offsets);
- }
- }
-}
-
void
-FinalOverriders::MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
- SubobjectOffsetsMapTy &Offsets) {
- // Iterate over the new offsets.
- for (SubobjectOffsetsMapTy::const_iterator I = NewOffsets.begin(),
- E = NewOffsets.end(); I != E; ++I) {
- const CXXRecordDecl *NewRD = I->first;
- const OffsetSetVectorTy& NewOffsetVector = I->second;
-
- OffsetSetVectorTy &OffsetVector = Offsets[NewRD];
-
- // Merge the new offsets set vector into the old.
- OffsetVector.insert(NewOffsetVector.begin(), NewOffsetVector.end());
- }
-}
-
-void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base,
- bool BaseSubobjectIsVisitedVBase,
- uint64_t OffsetInLayoutClass,
- SubobjectOffsetsMapTy &Offsets) {
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ uint64_t OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- SubobjectOffsetsMapTy NewOffsets;
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] =
+ Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+ // Traverse our bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore bases that don't have any virtual member functions.
- if (!BaseDecl->isPolymorphic())
- continue;
-
- bool IsVisitedVirtualBase = BaseSubobjectIsVisitedVBase;
+
uint64_t BaseOffset;
uint64_t BaseOffsetInLayoutClass;
if (I->isVirtual()) {
- if (!VisitedVirtualBases.insert(BaseDecl))
- IsVisitedVirtualBase = true;
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
-
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
- BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
- BaseOffsetInLayoutClass = Layout.getBaseClassOffset(BaseDecl) +
- OffsetInLayoutClass;
- }
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ uint64_t Offset = Layout.getBaseClassOffset(BaseDecl);
- // Compute the final overriders for this base.
- // We always want to compute the final overriders, even if the base is a
- // visited virtual base. Consider:
- //
- // struct A {
- // virtual void f();
- // virtual void g();
- // };
- //
- // struct B : virtual A {
- // void f();
- // };
- //
- // struct C : virtual A {
- // void g ();
- // };
- //
- // struct D : B, C { };
- //
- // Here, we still want to compute the overriders for A as a base of C,
- // because otherwise we'll miss that C::g overrides A::f.
- ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset),
- IsVisitedVirtualBase, BaseOffsetInLayoutClass,
- NewOffsets);
- }
-
- /// Now add the overriders for this particular subobject.
- /// (We don't want to do this more than once for a virtual base).
- if (!BaseSubobjectIsVisitedVBase)
- AddOverriders(Base, OffsetInLayoutClass, NewOffsets);
-
- // And merge the newly discovered subobject offsets.
- MergeSubobjectOffsets(NewOffsets, Offsets);
-
- /// Finally, add the offset for our own subobject.
- Offsets[RD].insert(Base.getBaseOffset());
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), I->isVirtual(),
+ BaseOffsetInLayoutClass, SubobjectOffsets,
+ SubobjectLayoutClassOffsets, SubobjectCounts);
+ }
}
-void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
+void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -537,7 +400,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
Base.getBaseOffset();
}
- dump(Out, BaseSubobject(BaseDecl, BaseOffset));
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
}
Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
@@ -551,17 +414,17 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
if (!MD->isVirtual())
continue;
- OverriderInfo Overrider = getOverrider(Base, MD);
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
Out << " " << MD->getQualifiedNameAsString() << " - (";
Out << Overrider.Method->getQualifiedNameAsString();
Out << ", " << ", " << Overrider.Offset / 8 << ')';
- AdjustmentOffsetsMapTy::const_iterator AI =
- ReturnAdjustments.find(std::make_pair(Base, MD));
- if (AI != ReturnAdjustments.end()) {
- const BaseOffset &Offset = AI->second;
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ if (!Offset.isEmpty()) {
Out << " [ret-adj: ";
if (Offset.VirtualBase)
Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
@@ -1013,7 +876,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
if (Overriders) {
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
- Overriders->getOverrider(Base, MD);
+ Overriders->getOverrider(MD, Base.getBaseOffset());
/// The vcall offset is the offset from the virtual base to the object
/// where the function was overridden.
@@ -1390,8 +1253,7 @@ void VTableBuilder::ComputeThisAdjustments() {
// Get the final overrider for this method.
FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(BaseSubobject(MD->getParent(),
- MethodInfo.BaseOffset), MD);
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
// Check if we need an adjustment at all.
if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
@@ -1763,7 +1625,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(Base, MD);
+ Overriders.getOverrider(MD, Base.getBaseOffset());
// Check if this virtual member function overrides a method in a primary
// base. If this is the case, and the return type doesn't require adjustment
@@ -1828,8 +1690,12 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
}
// Check if this overrider needs a return adjustment.
- BaseOffset ReturnAdjustmentOffset =
- Overriders.getReturnAdjustmentOffset(Base, MD);
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
@@ -2775,7 +2641,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
const CXXRecordDecl *RD = MD->getParent();
// Compute VTable related info for this class.
- ComputeVTableRelatedInformation(RD);
+ ComputeVTableRelatedInformation(RD, false);
ThunksMapTy::const_iterator I = Thunks.find(MD);
if (I == Thunks.end()) {
@@ -2788,24 +2654,30 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
EmitThunk(GD, ThunkInfoVector[I]);
}
-void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
- uint64_t *&LayoutData = VTableLayoutMap[RD];
+void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool RequireVTable) {
+ VTableLayoutData &Entry = VTableLayoutMap[RD];
+
+ // We may need to generate a definition for this vtable.
+ if (RequireVTable && !Entry.getInt()) {
+ if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
+ RD->getTemplateSpecializationKind()
+ != TSK_ExplicitInstantiationDeclaration)
+ CGM.DeferredVTables.push_back(RD);
+
+ Entry.setInt(true);
+ }
// Check if we've computed this information before.
- if (LayoutData)
+ if (Entry.getPointer())
return;
- // We may need to generate a definition for this vtable.
- if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
- RD->getTemplateSpecializationKind()
- != TSK_ExplicitInstantiationDeclaration)
- CGM.DeferredVTables.push_back(RD);
-
VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
// Add the VTable layout.
uint64_t NumVTableComponents = Builder.getNumVTableComponents();
- LayoutData = new uint64_t[NumVTableComponents + 1];
+ uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
+ Entry.setPointer(LayoutData);
// Store the number of components.
LayoutData[0] = NumVTableComponents;
@@ -3020,7 +2892,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
CGM.getMangleContext().mangleCXXVTable(RD, OutName);
llvm::StringRef Name = OutName.str();
- ComputeVTableRelatedInformation(RD);
+ ComputeVTableRelatedInformation(RD, true);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
@@ -3054,6 +2926,9 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
// Set the correct linkage.
VTable->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(VTable, RD);
}
llvm::GlobalVariable *
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
index e55377f..abcafd6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -207,8 +207,12 @@ class CodeGenVTables {
/// Thunks - Contains all thunks that a given method decl will need.
ThunksMapTy Thunks;
-
- typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy;
+
+ // The layout entry and a bool indicating whether we've actually emitted
+ // the vtable.
+ typedef llvm::PointerIntPair<uint64_t *, 1, bool> VTableLayoutData;
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableLayoutData>
+ VTableLayoutMapTy;
/// VTableLayoutMap - Stores the vtable layout for all record decls.
/// The layout is stored as an array of 64-bit integers, where the first
@@ -237,13 +241,13 @@ class CodeGenVTables {
uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
- return VTableLayoutMap.lookup(RD)[0];
+ return VTableLayoutMap.lookup(RD).getPointer()[0];
}
const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
- uint64_t *Components = VTableLayoutMap.lookup(RD);
+ uint64_t *Components = VTableLayoutMap.lookup(RD).getPointer();
return &Components[1];
}
@@ -275,7 +279,8 @@ class CodeGenVTables {
/// ComputeVTableRelatedInformation - Compute and store all vtable related
/// information (vtable layout, vbase offset offsets, thunks etc) for the
/// given record decl.
- void ComputeVTableRelatedInformation(const CXXRecordDecl *RD);
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
+ bool VTableRequired);
/// CreateVTableInitializer - Create a vtable initializer for the given record
/// decl.
@@ -296,7 +301,7 @@ public:
const CXXRecordDecl *RD) {
assert (RD->isDynamicClass() && "Non dynamic classes have no key.");
const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
- return KeyFunction && !KeyFunction->getBody();
+ return KeyFunction && !KeyFunction->hasBody();
}
/// needsVTTParameter - Return whether the given global decl needs a VTT
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
index a226400..b5a2329 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt
@@ -1,6 +1,7 @@
set(LLVM_NO_RTTI 1)
add_clang_library(clangCodeGen
+ BackendUtil.cpp
CGBlocks.cpp
CGBuiltin.cpp
CGCall.cpp
@@ -25,13 +26,16 @@ add_clang_library(clangCodeGen
CGTemporaries.cpp
CGVTables.cpp
CGVTT.cpp
+ CodeGenAction.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
CodeGenTypes.cpp
ItaniumCXXABI.cpp
Mangle.cpp
+ MicrosoftCXXABI.cpp
ModuleBuilder.cpp
TargetInfo.cpp
)
-add_dependencies(clangCodeGen ClangStmtNodes)
+add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..51c55a1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,348 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+ class BackendConsumer : public ASTConsumer {
+ Diagnostic &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ llvm::raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ llvm::OwningPtr<CodeGenerator> Gen;
+
+ llvm::OwningPtr<llvm::Module> TheModule;
+
+ public:
+ BackendConsumer(BackendAction action, Diagnostic &_Diags,
+ const CodeGenOptions &compopts,
+ const TargetOptions &targetopts, bool TimePasses,
+ const std::string &infile, llvm::raw_ostream *OS,
+ LLVMContext &C) :
+ Diags(_Diags),
+ Action(action),
+ CodeGenOpts(compopts),
+ TargetOpts(targetopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ }
+
+ llvm::Module *takeModule() { return TheModule.take(); }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.take();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
+ this);
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts,
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ Gen->HandleVTable(RD, DefinitionRequired);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+ };
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ llvm::MemoryBuffer *CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ FileID FID = CSM.createFileIDForMemBuffer(CBuf);
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ llvm::StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+
+ // If this problem has clang-level source location information, report the
+ // issue as being an error in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
+ diag::err_fe_inline_asm).AddString(Message);
+
+ if (D.getLoc().isValid())
+ Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ return;
+ }
+
+ // Otherwise, report the backend error as occuring in the generated .s file.
+ // If Loc is invalid, we still need to report the error, it just gets no
+ // location info.
+ Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
+}
+
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+
+CodeGenAction::~CodeGenAction() {}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // Steal the module from the consumer.
+ BackendConsumer *Consumer = static_cast<BackendConsumer*>(
+ &getCompilerInstance().getASTConsumer());
+
+ TheModule.reset(Consumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
+}
+
+static raw_ostream *GetOutputStream(CompilerInstance &CI,
+ llvm::StringRef InFile,
+ BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return 0;
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ assert(0 && "Invalid action!");
+ return 0;
+}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ llvm::OwningPtr<llvm::raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ return new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ CI.getLLVMContext());
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // FIXME: This is stupid, IRReader shouldn't take ownership.
+ llvm::MemoryBuffer *MainFileCopy =
+ llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
+ getCurrentFile().c_str());
+
+ llvm::SMDiagnostic Err;
+ TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext()));
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location.
+ SourceLocation Loc = SM.getLocation(
+ SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
+ Err.getColumnNo() + 1);
+
+ // Get a custom diagnostic for the error. We strip off a leading
+ // diagnostic code if there is one.
+ llvm::StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
+ Msg);
+
+ CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID);
+ return;
+ }
+
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), TheModule.get(),
+ BA, OS);
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+EmitAssemblyAction::EmitAssemblyAction()
+ : CodeGenAction(Backend_EmitAssembly) {}
+
+EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+
+EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+
+EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
+
+EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
index 73de0fd..eb6c436 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -14,13 +14,16 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGDebugInfo.h"
+#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -28,13 +31,20 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
- DebugInfo(0), IndirectBranch(0),
+ ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0),
- UniqueAggrDestructorCount(0) {
- LLVMIntTy = ConvertType(getContext().IntTy);
+ ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ TrapBB(0) {
+
+ // Get some frequently used types.
LLVMPointerWidth = Target.getPointerWidth(0);
+ llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+
Exceptions = getContext().getLangOptions().Exceptions;
CatchUndefined = getContext().getLangOptions().CatchUndefined;
CGM.getMangleContext().startNewFunction();
@@ -45,14 +55,6 @@ ASTContext &CodeGenFunction::getContext() const {
}
-llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
- llvm::BasicBlock *&BB = LabelMap[S];
- if (BB) return BB;
-
- // Create, but don't insert, the new block.
- return BB = createBasicBlock(S->getName());
-}
-
llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
llvm::Value *Res = LocalDeclMap[VD];
assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
@@ -87,25 +89,26 @@ void CodeGenFunction::EmitReturnBlock() {
// We have a valid insert point, reuse it if it is empty or there are no
// explicit jumps to the return block.
- if (CurBB->empty() || ReturnBlock->use_empty()) {
- ReturnBlock->replaceAllUsesWith(CurBB);
- delete ReturnBlock;
+ if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
+ ReturnBlock.Block->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.Block;
} else
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
return;
}
// Otherwise, if the return block is the target of a single direct
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
- if (ReturnBlock->hasOneUse()) {
+ if (ReturnBlock.Block->hasOneUse()) {
llvm::BranchInst *BI =
- dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
- if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.Block) {
// Reset insertion point and delete the branch.
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
- delete ReturnBlock;
+ delete ReturnBlock.Block;
return;
}
}
@@ -114,29 +117,37 @@ void CodeGenFunction::EmitReturnBlock() {
// unless it has uses. However, we still need a place to put the debug
// region.end for now.
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
}
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
- assert(BlockScopes.empty() &&
- "did not remove all blocks from block scope map!");
- assert(CleanupEntries.empty() &&
- "mismatched push/pop in cleanup stack!");
// Emit function epilog (to return).
EmitReturnBlock();
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(EndLoc);
DI->EmitRegionEnd(CurFn, Builder);
}
- EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+ EmitFunctionEpilog(*CurFnInfo);
EmitEndEHSpec(CurCodeDecl);
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
// If someone did an indirect goto, emit the indirect goto block at the end of
// the function.
if (IndirectBranch) {
@@ -158,6 +169,53 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
PN->eraseFromParent();
}
}
+
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ if (!ShouldInstrumentFunction())
+ return;
+
+ const llvm::PointerType *PointerTy;
+ const llvm::FunctionType *FunctionTy;
+ std::vector<const llvm::Type*> ProfileFuncArgs;
+
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ PointerTy = llvm::Type::getInt8PtrTy(VMContext);
+ ProfileFuncArgs.push_back(PointerTy);
+ ProfileFuncArgs.push_back(PointerTy);
+ FunctionTy = llvm::FunctionType::get(
+ llvm::Type::getVoidTy(VMContext),
+ ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ Builder.CreateCall2(F,
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite);
}
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
@@ -187,14 +245,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
- llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
- AllocaInsertPt = new llvm::BitCastInst(Undef,
- llvm::Type::getInt32Ty(VMContext), "",
- EntryBB);
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
- ReturnBlock = createBasicBlock("return");
+ ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
@@ -209,6 +265,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
// FIXME: Leaked.
// CC info is ignored, hopefully?
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
@@ -513,15 +571,11 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
- LLVMPointerWidth);
-
- Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtr), DestPtr,
+ Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
// TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- TypeInfo.second/8),
+ llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
+ llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
0));
}
@@ -531,7 +585,7 @@ llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
if (IndirectBranch == 0)
GetIndirectGotoBlock();
- llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
// Make sure the indirect branch includes all of the address-taken blocks.
IndirectBranch->addDestination(BB);
@@ -603,233 +657,574 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
- if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType())
return EmitScalarExpr(E);
- }
return EmitLValue(E).getAddress();
}
-void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly) {
- CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock,
- PreviousInvokeDest, EHOnly));
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ EHScopeStack::iterator E = EHStack.find(Old);
+ while (EHStack.begin() != E)
+ PopCleanupBlock();
+}
+
+/// Destroys a cleanup if it was unused.
+static void DestroyCleanup(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ assert(Entry->use_empty() && "destroying cleanup with uses!");
+ assert(Exit->getTerminator() == 0 &&
+ "exit has terminator but entry has no predecessors!");
+
+ // This doesn't always remove the entire cleanup, but it's much
+ // safer as long as we don't know what blocks belong to the cleanup.
+ // A *much* better approach if we care about this inefficiency would
+ // be to lazily emit the cleanup.
+
+ // If the exit block is distinct from the entry, give it a branch to
+ // an unreachable destination. This preserves the well-formedness
+ // of the IR.
+ if (Entry != Exit)
+ llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
+
+ assert(!Entry->getParent() && "cleanup entry already positioned?");
+ // We can't just delete the entry; we have to kill any references to
+ // its instructions in other blocks.
+ for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end();
+ I != E; ++I)
+ if (!I->use_empty())
+ I->replaceAllUsesWith(llvm::UndefValue::get(I->getType()));
+ delete Entry;
}
-void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
- assert(CleanupEntries.size() >= OldCleanupStackSize &&
- "Cleanup stack mismatch!");
+/// Creates a switch instruction to thread branches out of the given
+/// block (which is the exit block of a cleanup).
+static void CreateCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ if (Block->getTerminator()) {
+ assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
+ "cleanup block already has a terminator, but it isn't a switch");
+ return;
+ }
- while (CleanupEntries.size() > OldCleanupStackSize)
- EmitCleanupBlock();
+ llvm::Value *DestCodePtr
+ = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
+ CGBuilderTy Builder(Block);
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
}
-CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
- CleanupEntry &CE = CleanupEntries.back();
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup
+/// blocks.
+static void SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return;
- llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock;
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return;
+ assert(Br->getSuccessor(0) == Entry);
- std::vector<llvm::BasicBlock *> Blocks;
- std::swap(Blocks, CE.Blocks);
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
- std::vector<llvm::BranchInst *> BranchFixups;
- std::swap(BranchFixups, CE.BranchFixups);
+ // Kill the branch.
+ Br->eraseFromParent();
- bool EHOnly = CE.EHOnly;
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
- setInvokeDest(CE.PreviousInvokeDest);
+ // Kill the entry block.
+ Entry->eraseFromParent();
- CleanupEntries.pop_back();
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+}
- // Check if any branch fixups pointed to the scope we just popped. If so,
- // we can remove them.
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
+/// Attempts to reduce an cleanup's exit switch to an unconditional
+/// branch.
+static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
+ llvm::TerminatorInst *Terminator = Exit->getTerminator();
+ assert(Terminator && "completed cleanup exit has no terminator");
- if (I == BlockScopes.end())
- continue;
+ llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
+ if (!Switch) return;
+ if (Switch->getNumCases() != 2) return; // default + 1
- assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+ llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
+ llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
- if (I->second == CleanupEntries.size()) {
- // We don't need to do this branch fixup.
- BranchFixups[i] = BranchFixups.back();
- BranchFixups.pop_back();
- i--;
- e--;
- continue;
- }
- }
+ // Replace the switch instruction with an unconditional branch.
+ llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
+ Switch->eraseFromParent();
+ llvm::BranchInst::Create(Dest, Exit);
- llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock;
- llvm::BasicBlock *EndBlock = 0;
- if (!BranchFixups.empty()) {
- if (!SwitchBlock)
- SwitchBlock = createBasicBlock("cleanup.switch");
- EndBlock = createBasicBlock("cleanup.end");
+ // Delete all uses of the condition variable.
+ Cond->eraseFromParent();
+ while (!CondVar->use_empty())
+ cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ // Delete the condition variable itself.
+ CondVar->eraseFromParent();
+}
- Builder.SetInsertPoint(SwitchBlock);
+/// Threads a branch fixup through a cleanup block.
+static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
+ BranchFixup &Fixup,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ if (!Exit->getTerminator())
+ CreateCleanupSwitch(CGF, Exit);
- llvm::Value *DestCodePtr
- = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
- "cleanup.dst");
- llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+ // Find the switch and its destination index alloca.
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
+ llvm::Value *DestCodePtr =
+ cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
- // Create a switch instruction to determine where to jump next.
- llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
- BranchFixups.size());
+ // Compute the index of the new case we're adding to the switch.
+ unsigned Index = Switch->getNumCases();
- // Restore the current basic block (if any)
- if (CurBB) {
- Builder.SetInsertPoint(CurBB);
+ const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
+ llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
- // If we had a current basic block, we also need to emit an instruction
- // to initialize the cleanup destination.
- Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)),
- DestCodePtr);
- } else
- Builder.ClearInsertionPoint();
-
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BranchInst *BI = BranchFixups[i];
- llvm::BasicBlock *Dest = BI->getSuccessor(0);
-
- // Fixup the branch instruction to point to the cleanup block.
- BI->setSuccessor(0, CleanupEntryBlock);
-
- if (CleanupEntries.empty()) {
- llvm::ConstantInt *ID;
-
- // Check if we already have a destination for this block.
- if (Dest == SI->getDefaultDest())
- ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
- else {
- ID = SI->findCaseDest(Dest);
- if (!ID) {
- // No code found, get a new unique one by using the number of
- // switch successors.
- ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- SI->getNumSuccessors());
- SI->addCase(ID, Dest);
- }
- }
+ // Set the index in the origin block.
+ new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
- } else {
- // We need to jump through another cleanup block. Create a pad block
- // with a branch instruction that jumps to the final destination and add
- // it as a branch fixup to the current cleanup scope.
+ // Add a case to the switch.
+ Switch->addCase(IndexV, Fixup.Destination);
- // Create the pad block.
- llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+ // Change the last branch to point to the cleanup entry block.
+ Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
- // Create a unique case ID.
- llvm::ConstantInt *ID
- = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- SI->getNumSuccessors());
+ // And finally, update the fixup.
+ Fixup.LatestBranch = Switch;
+ Fixup.LatestBranchIndex = Index;
+}
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
+/// Try to simplify both the entry and exit edges of a cleanup.
+static void SimplifyCleanupEdges(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
- // Add it as the destination.
- SI->addCase(ID, CleanupPad);
+ // Given their current implementations, it's important to run these
+ // in this order: SimplifyCleanupEntry will delete Entry if it can
+ // be merged into its predecessor, which will then break
+ // SimplifyCleanupExit if (as is common) Entry == Exit.
- // Create the branch to the final destination.
- llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
- CleanupPad->getInstList().push_back(BI);
+ SimplifyCleanupExit(Exit);
+ SimplifyCleanupEntry(CGF, Entry);
+}
- // And add it as a branch fixup.
- CleanupEntries.back().BranchFixups.push_back(BI);
- }
- }
+static void EmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH) {
+ if (ForEH) CGF.EHStack.pushTerminate();
+ Fn->Emit(CGF, ForEH);
+ if (ForEH) CGF.EHStack.popTerminate();
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+}
+
+static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
+ EHScopeStack::LazyCleanup *Fn,
+ bool ForEH,
+ llvm::BasicBlock *Entry) {
+ assert(Entry && "no entry block for cleanup");
+
+ // Remove the switch and load from the end of the entry block.
+ llvm::Instruction *Switch = &Entry->getInstList().back();
+ Entry->getInstList().remove(Switch);
+ assert(isa<llvm::SwitchInst>(Switch));
+ llvm::Instruction *Load = &Entry->getInstList().back();
+ Entry->getInstList().remove(Load);
+ assert(isa<llvm::LoadInst>(Load));
+
+ assert(Entry->getInstList().empty() &&
+ "lazy cleanup block not empty after removing load/switch pair?");
+
+ // Emit the actual cleanup at the end of the entry block.
+ CGF.Builder.SetInsertPoint(Entry);
+ EmitLazyCleanup(CGF, Fn, ForEH);
+
+ // Put the load and switch at the end of the exit block.
+ llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
+ Exit->getInstList().push_back(Load);
+ Exit->getInstList().push_back(Switch);
+
+ // Clean up the edges if possible.
+ SimplifyCleanupEdges(CGF, Entry, Exit);
+
+ CGF.Builder.ClearInsertionPoint();
+}
+
+static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
+ assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
+ assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getEHBlock();
+ bool RequiresEHCleanup = (EHEntry != 0);
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether control has already been threaded through this cleanup
+ llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
+ bool HasExistingBranches = (NormalEntry != 0);
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0);
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
}
- // Remove all blocks from the block scope map.
- for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
- assert(BlockScopes.count(Blocks[i]) &&
- "Did not find block in scope map!");
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ CGF.EHStack.popCleanup();
+ assert(CGF.EHStack.getNumBranchFixups() == 0 ||
+ CGF.EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::LazyCleanup *Fn =
+ reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
+
+ // We're done with the scope; pop it off so we can emit the cleanups.
+ CGF.EHStack.popCleanup();
+
+ if (RequiresNormalCleanup) {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasFixups && !HasExistingBranches) {
+ EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ if (!HasExistingBranches) {
+ NormalEntry = CGF.createBasicBlock("cleanup");
+ CreateCleanupSwitch(CGF, NormalEntry);
+ }
- BlockScopes.erase(Blocks[i]);
+ CGF.EmitBlock(NormalEntry);
+
+ // Thread the fallthrough edge through the (momentarily trivial)
+ // cleanup.
+ llvm::BasicBlock *FallthroughDestination = 0;
+ if (HasFallthrough) {
+ assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
+ FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
+
+ BranchFixup Fix;
+ Fix.Destination = FallthroughDestination;
+ Fix.LatestBranch = FallthroughSource->getTerminator();
+ Fix.LatestBranchIndex = 0;
+ Fix.Origin = Fix.LatestBranch;
+
+ // Restore fixup invariant. EmitBlock added a branch to the
+ // cleanup which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Fix.LatestBranch)
+ ->setSuccessor(0, Fix.Destination);
+
+ ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
+ }
+
+ // Thread any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (CGF.EHStack.getBranchFixup(I).Destination)
+ ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
+ NormalEntry, NormalEntry);
+
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
+
+ if (HasFallthrough)
+ CGF.EmitBlock(FallthroughDestination);
+ }
}
- return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly);
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(EHEntry);
+ SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
+ CGF.Builder.restoreIP(SavedIP);
+ }
}
-void CodeGenFunction::EmitCleanupBlock() {
- CleanupBlockInfo Info = PopCleanupBlock();
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock() {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ if (isa<EHLazyCleanupScope>(*EHStack.begin()))
+ return PopLazyCleanupBlock(*this);
+
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Handle the EH cleanup if (1) there is one and (2) it's different
+ // from the normal cleanup.
+ if (Scope.isEHCleanup() &&
+ Scope.getEHEntry() != Scope.getNormalEntry()) {
+ llvm::BasicBlock *EHEntry = Scope.getEHEntry();
+ llvm::BasicBlock *EHExit = Scope.getEHExit();
+
+ if (EHEntry->use_empty()) {
+ DestroyCleanup(*this, EHEntry, EHExit);
+ } else {
+ // TODO: this isn't really the ideal location to put this EH
+ // cleanup, but lazy emission is a better solution than trying
+ // to pick a better spot.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(EHEntry);
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEdges(*this, EHEntry, EHExit);
+ }
+ }
+
+ // If we only have an EH cleanup, we don't really need to do much
+ // here. Branch fixups just naturally drop down to the enclosing
+ // cleanup scope.
+ if (!Scope.isNormalCleanup()) {
+ EHStack.popCleanup();
+ assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
+ return;
+ }
- if (Info.EHOnly) {
- // FIXME: Add this to the exceptional edge
- if (Info.CleanupBlock->getNumUses() == 0)
- delete Info.CleanupBlock;
+ // Check whether the scope has any fixups that need to be threaded.
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // Grab the entry and exit blocks.
+ llvm::BasicBlock *Entry = Scope.getNormalEntry();
+ llvm::BasicBlock *Exit = Scope.getNormalExit();
+
+ // Check whether anything's been threaded through the cleanup already.
+ assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
+ "cleanup entry/exit mismatch");
+ bool HasExistingBranches = !Entry->use_empty();
+
+ // Check whether we need to emit a "fallthrough" branch through the
+ // cleanup for the current insertion point.
+ llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
+ if (FallThrough && FallThrough->getTerminator())
+ FallThrough = 0;
+
+ // If *nothing* is using the cleanup, kill it.
+ if (!FallThrough && !HasFixups && !HasExistingBranches) {
+ EHStack.popCleanup();
+ DestroyCleanup(*this, Entry, Exit);
return;
}
- // Scrub debug location info.
- for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(),
- LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI)
- Builder.SetInstDebugLocation(LBI);
+ // Otherwise, add the block to the function.
+ EmitBlock(Entry);
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.CleanupBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
- delete Info.CleanupBlock;
- } else
- EmitBlock(Info.CleanupBlock);
+ if (FallThrough)
+ Builder.SetInsertPoint(Exit);
+ else
+ Builder.ClearInsertionPoint();
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
-}
+ // Fast case: if we don't have to add any fixups, and either
+ // we don't have a fallthrough or the cleanup wasn't previously
+ // used, then the setup above is sufficient.
+ if (!HasFixups) {
+ if (!FallThrough) {
+ assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
+ EHStack.popCleanup();
+ SimplifyCleanupEdges(*this, Entry, Exit);
+ return;
+ } else if (!HasExistingBranches) {
+ assert(FallThrough && "no reason for cleanup but didn't kill before");
+ // We can't simplify the exit edge in this case because we're
+ // already inserting at the end of the exit block.
+ EHStack.popCleanup();
+ SimplifyCleanupEntry(*this, Entry);
+ return;
+ }
+ }
+
+ // Otherwise we're going to have to thread things through the cleanup.
+ llvm::SmallVector<BranchFixup*, 8> Fixups;
-void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
- assert(!CleanupEntries.empty() &&
- "Trying to add branch fixup without cleanup block!");
+ // Synthesize a fixup for the current insertion point.
+ BranchFixup Cur;
+ if (FallThrough) {
+ Cur.Destination = createBasicBlock("cleanup.cont");
+ Cur.LatestBranch = FallThrough->getTerminator();
+ Cur.LatestBranchIndex = 0;
+ Cur.Origin = Cur.LatestBranch;
- // FIXME: We could be more clever here and check if there's already a branch
- // fixup for this destination and recycle it.
- CleanupEntries.back().BranchFixups.push_back(BI);
+ // Restore fixup invariant. EmitBlock added a branch to the cleanup
+ // which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
+
+ Fixups.push_back(&Cur);
+ } else {
+ Cur.Destination = 0;
+ }
+
+ // Collect any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (EHStack.getBranchFixup(I).Destination)
+ Fixups.push_back(&EHStack.getBranchFixup(I));
+
+ assert(!Fixups.empty() && "no fixups, invariants broken!");
+
+ // If there's only a single fixup to thread through, do so with
+ // unconditional branches. This only happens if there's a single
+ // branch and no fallthrough.
+ if (Fixups.size() == 1 && !HasExistingBranches) {
+ Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
+ llvm::BranchInst *Br =
+ llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
+ Fixups[0]->LatestBranch = Br;
+ Fixups[0]->LatestBranchIndex = 0;
+
+ // Otherwise, force a switch statement and thread everything through
+ // the switch.
+ } else {
+ CreateCleanupSwitch(*this, Exit);
+ for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
+ ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
+ }
+
+ // Emit the fallthrough destination block if necessary.
+ if (Cur.Destination)
+ EmitBlock(Cur.Destination);
+
+ // We're finally done with the cleanup.
+ EHStack.popCleanup();
}
-void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
if (!HaveInsertPoint())
return;
- llvm::BranchInst* BI = Builder.CreateBr(Dest);
-
- Builder.ClearInsertionPoint();
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
- // The stack is empty, no need to do any cleanup.
- if (CleanupEntries.empty())
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasNormalCleanups()) {
+ Builder.ClearInsertionPoint();
return;
+ }
- if (!Dest->getParent()) {
- // We are trying to branch to a block that hasn't been inserted yet.
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope.
+ if (!Dest.ScopeDepth.isValid()) {
+ EHStack.addBranchFixup() = Fixup;
+ Builder.ClearInsertionPoint();
return;
}
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
- if (I == BlockScopes.end()) {
- // We are trying to jump to a block that is outside of any cleanup scope.
- AddBranchFixup(BI);
- return;
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isNormalCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
+ Scope.getNormalExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isNormalCleanup()) {
+ llvm::BasicBlock *Block = Scope.getNormalBlock();
+ if (!Block) {
+ Block = createBasicBlock("cleanup");
+ Scope.setNormalBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
+ }
}
+
+ Builder.ClearInsertionPoint();
+}
- assert(I->second < CleanupEntries.size() &&
- "Trying to branch into cleanup region");
+void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
- if (I->second == CleanupEntries.size() - 1) {
- // We have a branch to a block in the same scope.
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasEHCleanups()) {
+ Builder.ClearInsertionPoint();
return;
}
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // We should never get invalid scope depths for these: invalid scope
+ // depths only arise for as-yet-unemitted labels, and we can't do an
+ // EH-unwind to one of those.
+ assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
+
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isEHCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
+ Scope.getEHExit());
+ } else if (isa<EHLazyCleanupScope>(*I)) {
+ EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
+ if (Scope.isEHCleanup()) {
+ llvm::BasicBlock *Block = Scope.getEHBlock();
+ if (!Block) {
+ Block = createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Block);
+ }
+ ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ }
+ }
+ }
+
+ Builder.ClearInsertionPoint();
}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
index ece275e..5ee3db0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -37,6 +37,7 @@ namespace llvm {
class SwitchInst;
class Twine;
class Value;
+ class CallSite;
}
namespace clang {
@@ -69,12 +70,317 @@ namespace CodeGen {
class CGRecordLayout;
class CGBlockInfo;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The origin of the branch. Any switch-index stores required by
+ /// cleanup threading are added before this instruction.
+ llvm::Instruction *Origin;
+
+ /// The destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The last branch of the fixup. It is an invariant that
+ /// LatestBranch->getSuccessor(LatestBranchIndex) == Destination.
+ ///
+ /// The branch is always either a BranchInst or a SwitchInst.
+ llvm::TerminatorInst *LatestBranch;
+ unsigned LatestBranchIndex;
+};
+
+enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// A lazy cleanup. Subclasses must be POD-like: cleanups will
+ /// not be destructed, and they will be allocated on the cleanup
+ /// stack and freely copied and moved around.
+ ///
+ /// LazyCleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class LazyCleanup {
+ public:
+ // Anchor the construction vtable. We use the destructor because
+ // gcc gives an obnoxious warning if there are virtual methods
+ // with an accessible non-virtual destructor. Unfortunately,
+ // declaring this destructor makes it non-trivial, but there
+ // doesn't seem to be any other way around this warning.
+ //
+ // This destructor will never be called.
+ virtual ~LazyCleanup();
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH cleanup on the stack.
+ stable_iterator InnermostEHCleanup;
+
+ /// The number of catches on the stack.
+ unsigned CatchDepth;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void popNullFixups();
+
+ void *pushLazyCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHCleanup(stable_end()),
+ CatchDepth(0) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushLazyCleanup(CleanupKind Kind) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushLazyCleanup(Kind, sizeof(T));
+ LazyCleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
+ /// Push a cleanup on the stack.
+ void pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit);
+
+ /// Pops a cleanup scope off the stack. This should only be called
+ /// by CodeGenFunction::PopCleanupBlock.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return (CatchDepth || hasEHCleanups());
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+
+ /// Determines whether there are any EH cleanups on the stack.
+ bool hasEHCleanups() const {
+ return InnermostEHCleanup != stable_end();
+ }
+
+ /// Returns the innermost EH cleanup on the stack, or stable_end()
+ /// if there are no EH cleanups.
+ stable_iterator getInnermostEHCleanup() const {
+ return InnermostEHCleanup;
+ }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Finds the nearest cleanup enclosing the given iterator.
+ /// Returns stable_iterator::invalid() if there are no such cleanups.
+ stable_iterator getEnclosingEHCleanup(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Mark any branch fixups leading to the given block as resolved.
+ void resolveBranchFixups(llvm::BasicBlock *Dest);
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public BlockFunction {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
public:
+ /// A jump destination is a pair of a basic block and a cleanup
+ /// depth. They are used to implement direct jumps across cleanup
+ /// scopes, e.g. goto, break, continue, and return.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth() {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth)
+ : Block(Block), ScopeDepth(Depth) {}
+
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ };
+
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
@@ -94,7 +400,8 @@ public:
GlobalDecl CurGD;
/// ReturnBlock - Unified return block.
- llvm::BasicBlock *ReturnBlock;
+ JumpDest ReturnBlock;
+
/// ReturnValue - The temporary alloca to hold the return value. This is null
/// iff the function has no return value.
llvm::Value *ReturnValue;
@@ -103,7 +410,8 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
- const llvm::Type *LLVMIntTy;
+ // intptr_t, i32, i64
+ const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty;
uint32_t LLVMPointerWidth;
bool Exceptions;
@@ -112,141 +420,97 @@ public:
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
-
-public:
- /// ObjCEHValueStack - Stack of Objective-C exception values, used for
- /// rethrows.
- llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
- /// passed in block as the cleanup block.
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly = false);
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) {
- PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false);
- }
-
- /// CleanupBlockInfo - A struct representing a popped cleanup block.
- struct CleanupBlockInfo {
- /// CleanupEntryBlock - the cleanup entry block
- llvm::BasicBlock *CleanupBlock;
+ EHScopeStack EHStack;
- /// SwitchBlock - the block (if any) containing the switch instruction used
- /// for jumping to the final destination.
- llvm::BasicBlock *SwitchBlock;
+ /// The exception slot. All landing pads write the current
+ /// exception pointer into this alloca.
+ llvm::Value *ExceptionSlot;
- /// EndBlock - the default destination for the switch instruction.
- llvm::BasicBlock *EndBlock;
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
- /// EHOnly - True iff this cleanup should only be performed on the
- /// exceptional edge.
- bool EHOnly;
+ llvm::BasicBlock *getInvokeDestImpl();
- CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
- llvm::BasicBlock *eb, bool ehonly = false)
- : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {}
- };
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- /// EHCleanupBlock - RAII object that will create a cleanup block for the
- /// exceptional edge and set the insert point to that block. When destroyed,
- /// it creates the cleanup edge and sets the insert point to the previous
- /// block.
- class EHCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *PreviousInsertionBlock;
- llvm::BasicBlock *CleanupHandler;
- llvm::BasicBlock *PreviousInvokeDest;
- public:
- EHCleanupBlock(CodeGenFunction &cgf)
- : CGF(cgf),
- PreviousInsertionBlock(CGF.Builder.GetInsertBlock()),
- CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)),
- PreviousInvokeDest(CGF.getInvokeDest()) {
- llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
- CGF.Builder.SetInsertPoint(CleanupHandler);
- CGF.setInvokeDest(TerminateHandler);
- }
- ~EHCleanupBlock();
+ // A struct holding information about a finally block's IR
+ // generation. For now, doesn't actually hold anything.
+ struct FinallyInfo {
};
- /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
- /// branch fixups and return a block info struct with the switch block and end
- /// block. This will also reset the invoke handler to the previous value
- /// from when the cleanup block was created.
- CleanupBlockInfo PopCleanupBlock();
-
- /// DelayedCleanupBlock - RAII object that will create a cleanup block and set
- /// the insert point to that block. When destructed, it sets the insert point
- /// to the previous block and pushes a new cleanup entry on the stack.
- class DelayedCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *CurBB;
- llvm::BasicBlock *CleanupEntryBB;
- llvm::BasicBlock *CleanupExitBB;
- llvm::BasicBlock *CurInvokeDest;
- bool EHOnly;
+ FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn);
+ void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock();
+
+ /// CleanupBlock - RAII object that will create a cleanup block and
+ /// set the insert point to that block. When destructed, it sets the
+ /// insert point to the previous block and pushes a new cleanup
+ /// entry on the stack.
+ class CleanupBlock {
+ CodeGenFunction &CGF;
+ CGBuilderTy::InsertPoint SavedIP;
+ llvm::BasicBlock *NormalCleanupEntryBB;
+ llvm::BasicBlock *NormalCleanupExitBB;
+ llvm::BasicBlock *EHCleanupEntryBB;
public:
- DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
- : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
- CleanupEntryBB(CGF.createBasicBlock("cleanup")),
- CleanupExitBB(0),
- CurInvokeDest(CGF.getInvokeDest()),
- EHOnly(ehonly) {
- CGF.Builder.SetInsertPoint(CleanupEntryBB);
- }
+ CleanupBlock(CodeGenFunction &CGF, CleanupKind Kind);
- llvm::BasicBlock *getCleanupExitBlock() {
- if (!CleanupExitBB)
- CleanupExitBB = CGF.createBasicBlock("cleanup.exit");
- return CleanupExitBB;
- }
+ /// If we're currently writing a normal cleanup, tie that off and
+ /// start writing an EH cleanup.
+ void beginEHCleanup();
- ~DelayedCleanupBlock() {
- CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest,
- EHOnly);
- // FIXME: This is silly, move this into the builder.
- if (CurBB)
- CGF.Builder.SetInsertPoint(CurBB);
- else
- CGF.Builder.ClearInsertionPoint();
- }
+ ~CleanupBlock();
};
- /// \brief Enters a new scope for capturing cleanups, all of which will be
- /// executed once the scope is exited.
- class CleanupScope {
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
CodeGenFunction& CGF;
- size_t CleanupStackDepth;
+ EHScopeStack::stable_iterator CleanupStackDepth;
bool OldDidCallStackSave;
bool PerformCleanup;
- CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT
- CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
public:
/// \brief Enter a new cleanup scope.
- explicit CleanupScope(CodeGenFunction &CGF)
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
: CGF(CGF), PerformCleanup(true)
{
- CleanupStackDepth = CGF.CleanupEntries.size();
+ CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
}
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
- ~CleanupScope() {
+ ~RunCleanupsScope() {
if (PerformCleanup) {
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
}
}
/// \brief Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
- return CGF.CleanupEntries.size() > CleanupStackDepth;
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
/// \brief Force the emission of cleanups now, instead of waiting
@@ -254,42 +518,39 @@ public:
void ForceCleanup() {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
PerformCleanup = false;
}
};
- /// CXXTemporariesCleanupScope - Enters a new scope for catching live
- /// temporaries, all of which will be popped once the scope is exited.
- class CXXTemporariesCleanupScope {
- CodeGenFunction &CGF;
- size_t NumLiveTemporaries;
-
- // DO NOT IMPLEMENT
- CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &);
- CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &);
-
- public:
- explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF)
- : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { }
-
- ~CXXTemporariesCleanupScope() {
- while (CGF.LiveTemporaries.size() > NumLiveTemporaries)
- CGF.PopCXXTemporary();
- }
- };
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
- /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
- /// blocks that have been added.
- void EmitCleanupBlocks(size_t OldCleanupStackSize);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) const {
+ return JumpDest(Target, EHStack.stable_begin());
+ }
- /// EmitBranchThroughCleanup - Emit a branch from the current insert block
- /// through the cleanup handling code (if any) and then on to \arg Dest.
- ///
- /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
- /// this behavior for branches?
- void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ return JumpDest(createBasicBlock(Name), EHStack.stable_begin());
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// EmitBranchThroughEHCleanup - Emit a branch from the current
+ /// insert block through the EH cleanup handling code (if any) and
+ /// then on to \arg Dest.
+ void EmitBranchThroughEHCleanup(JumpDest Dest);
/// BeginConditionalBranch - Should be called before a conditional part of an
/// expression is emitted. For example, before the RHS of the expression below
@@ -326,16 +587,16 @@ private:
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
- llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+ llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
struct BreakContinue {
- BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
- : BreakBlock(bb), ContinueBlock(cb) {}
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
- llvm::BasicBlock *BreakBlock;
- llvm::BasicBlock *ContinueBlock;
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
};
llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
@@ -363,44 +624,9 @@ private:
/// calling llvm.stacksave for multiple VLAs in the same scope.
bool DidCallStackSave;
- struct CleanupEntry {
- /// CleanupEntryBlock - The block of code that does the actual cleanup.
- llvm::BasicBlock *CleanupEntryBlock;
-
- /// CleanupExitBlock - The cleanup exit block.
- llvm::BasicBlock *CleanupExitBlock;
-
- /// Blocks - Basic blocks that were emitted in the current cleanup scope.
- std::vector<llvm::BasicBlock *> Blocks;
-
- /// BranchFixups - Branch instructions to basic blocks that haven't been
- /// inserted into the current function yet.
- std::vector<llvm::BranchInst *> BranchFixups;
-
- /// PreviousInvokeDest - The invoke handler from the start of the cleanup
- /// region.
- llvm::BasicBlock *PreviousInvokeDest;
-
- /// EHOnly - Perform this only on the exceptional edge, not the main edge.
- bool EHOnly;
-
- explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool ehonly)
- : CleanupEntryBlock(CleanupEntryBlock),
- CleanupExitBlock(CleanupExitBlock),
- PreviousInvokeDest(PreviousInvokeDest),
- EHOnly(ehonly) {}
- };
-
- /// CleanupEntries - Stack of cleanup entries.
- llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
-
- typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
-
- /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
- BlockScopeMap BlockScopes;
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
@@ -413,31 +639,6 @@ private:
ImplicitParamDecl *CXXVTTDecl;
llvm::Value *CXXVTTValue;
- /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
- struct CXXLiveTemporaryInfo {
- /// Temporary - The live temporary.
- const CXXTemporary *Temporary;
-
- /// ThisPtr - The pointer to the temporary.
- llvm::Value *ThisPtr;
-
- /// DtorBlock - The destructor block.
- llvm::BasicBlock *DtorBlock;
-
- /// CondPtr - If this is a conditional temporary, this is the pointer to the
- /// condition variable that states whether the destructor should be called
- /// or not.
- llvm::Value *CondPtr;
-
- CXXLiveTemporaryInfo(const CXXTemporary *temporary,
- llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
- llvm::Value *condptr)
- : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
- CondPtr(condptr) { }
- };
-
- llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
-
/// ConditionalBranchLevel - Contains the nesting level of the current
/// conditional branch. This is used so that we know if a temporary should be
/// destroyed conditionally.
@@ -453,18 +654,32 @@ private:
/// number that holds the value.
unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+ llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
- int UniqueAggrDestructorCount;
public:
CodeGenFunction(CodeGenModule &cgm);
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
- llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
- void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+ /// Returns a pointer to the function's exception object slot, which
+ /// is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
@@ -501,7 +716,8 @@ public:
const llvm::StructType *,
std::vector<HelperInfo> *);
- llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
llvm::DenseMap<const Decl*, llvm::Value*> ldm);
@@ -567,6 +783,15 @@ public:
void EmitDtorEpilogue(const CXXDestructorDecl *Dtor,
CXXDtorType Type);
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
@@ -576,7 +801,7 @@ public:
/// EmitFunctionEpilog - Emit the target specific LLVM code to return the
/// given temporary.
- void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+ void EmitFunctionEpilog(const CGFunctionInfo &FI);
/// EmitStartEHSpec - Emit the start of the exception spec.
void EmitStartEHSpec(const Decl *D);
@@ -584,7 +809,12 @@ public:
/// EmitEndEHSpec - Emit the end of the exception spec.
void EmitEndEHSpec(const Decl *D);
- /// getTerminateHandler - Return a handler that just calls terminate.
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
const llvm::Type *ConvertTypeForMem(QualType T);
@@ -617,7 +847,7 @@ public:
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
- llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+ JumpDest getJumpDestForLabel(const LabelStmt *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
@@ -688,11 +918,11 @@ public:
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
- llvm::Value *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
- llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
@@ -835,15 +1065,17 @@ public:
llvm::Value *NumElements,
llvm::Value *This);
- llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
- const ArrayType *Array,
- llvm::Value *This);
+ llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This);
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
+ llvm::Value *NumElements);
- void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
- void PopCXXTemporary();
+ void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -874,10 +1106,13 @@ public:
/// This function can be called with a null (unreachable) insert point.
void EmitBlockVarDecl(const VarDecl &D);
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
/// EmitLocalBlockVarDecl - Emit a local block variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitLocalBlockVarDecl(const VarDecl &D);
+ void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
void EmitStaticBlockVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
@@ -938,13 +1173,8 @@ public:
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
llvm::Constant *getUnwindResumeOrRethrowFn();
- struct CXXTryStmtInfo {
- llvm::BasicBlock *SavedLandingPad;
- llvm::BasicBlock *HandlerBlock;
- llvm::BasicBlock *FinallyBlock;
- };
- CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S);
- void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info);
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
@@ -1050,7 +1280,7 @@ public:
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
- LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E);
+ LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
@@ -1088,6 +1318,7 @@ public:
LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
@@ -1114,6 +1345,11 @@ public:
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name = "");
+
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty);
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
@@ -1146,6 +1382,14 @@ public:
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ llvm::SmallVectorImpl<llvm::Value*> &O,
+ const char *name, bool splat = false,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
+ bool negateForRightShift);
+
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -1164,7 +1408,8 @@ public:
/// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
/// expression. Will emit a temporary variable if E is not an LValue.
- RValue EmitReferenceBindingToExpr(const Expr* E, bool IsInitializer = false);
+ RValue EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl);
//===--------------------------------------------------------------------===//
// Expression Emission
@@ -1260,7 +1505,7 @@ public:
/// GenerateCXXGlobalDtorFunc - Generates code for destroying global
/// variables.
void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
- const std::vector<std::pair<llvm::Constant*,
+ const std::vector<std::pair<llvm::WeakVH,
llvm::Constant*> > &DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
@@ -1308,7 +1553,6 @@ public:
RValue EmitDelegateCallArg(const VarDecl *Param);
private:
-
void EmitReturnOfRValue(RValue RV, QualType Ty);
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
@@ -1331,13 +1575,6 @@ private:
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
- /// EmitCleanupBlock - emits a single cleanup block.
- void EmitCleanupBlock();
-
- /// AddBranchFixup - adds a branch instruction to the list of fixups for the
- /// current cleanup scope.
- void AddBranchFixup(llvm::BranchInst *BI);
-
/// EmitCallArgs - Emit call arguments for a function.
/// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
@@ -1381,6 +1618,8 @@ private:
const TargetCodeGenInfo &getTargetHooks() const {
return CGM.getTargetCodeGenInfo();
}
+
+ void EmitDeclMetadata();
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
index 103024c..bf606a6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -18,11 +18,12 @@
#include "CGObjCRuntime.h"
#include "Mangle.h"
#include "TargetInfo.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
@@ -86,8 +87,10 @@ void CodeGenModule::createObjCRuntime() {
}
void CodeGenModule::createCXXABI() {
- // For now, just create an Itanium ABI.
- ABI = CreateItaniumCXXABI(*this);
+ if (Context.Target.getCXXABI() == "microsoft")
+ ABI = CreateMicrosoftCXXABI(*this);
+ else
+ ABI = CreateItaniumCXXABI(*this);
}
void CodeGenModule::Release() {
@@ -101,6 +104,9 @@ void CodeGenModule::Release() {
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitAnnotations();
EmitLLVMUsed();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
}
bool CodeGenModule::isTargetDarwin() const {
@@ -149,7 +155,38 @@ CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
return LangOptions::Protected;
}
}
+
+ if (getLangOptions().CPlusPlus) {
+ // Entities subject to an explicit instantiation declaration get default
+ // visibility.
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const ClassTemplateSpecializationDecl *ClassSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ if (ClassSpec->getSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (Record->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return LangOptions::Default;
+ } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if (Var->isStaticDataMember() &&
+ (Var->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration))
+ return LangOptions::Default;
+ }
+ // If -fvisibility-inlines-hidden was provided, then inline C++ member
+ // functions get "hidden" visibility by default.
+ if (getLangOptions().InlineVisibilityHidden)
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isInlined())
+ return LangOptions::Hidden;
+ }
+
// This decl should have the same visibility as its parent.
if (const DeclContext *DC = D->getDeclContext())
return getDeclVisibilityMode(cast<Decl>(DC));
@@ -176,32 +213,44 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
}
-void CodeGenModule::getMangledName(MangleBuffer &Buffer, GlobalDecl GD) {
+llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
- if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
- return getMangledCXXCtorName(Buffer, D, GD.getCtorType());
- if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
- return getMangledCXXDtorName(Buffer, D, GD.getDtorType());
-
- return getMangledName(Buffer, ND);
-}
+ llvm::StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!Str.empty())
+ return Str;
-/// \brief Retrieves the mangled name for the given declaration.
-///
-/// If the given declaration requires a mangled name, returns an
-/// const char* containing the mangled name. Otherwise, returns
-/// the unmangled name.
-///
-void CodeGenModule::getMangledName(MangleBuffer &Buffer,
- const NamedDecl *ND) {
if (!getMangleContext().shouldMangleDeclName(ND)) {
- assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
- Buffer.setString(ND->getNameAsCString());
- return;
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+
+ Str = II->getName();
+ return Str;
}
+
+ llvm::SmallString<256> Buffer;
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
+ getMangleContext().mangleBlock(GD, BD, Buffer);
+ else
+ getMangleContext().mangleName(ND, Buffer);
+
+ // Allocate space for the mangled name.
+ size_t Length = Buffer.size();
+ char *Name = MangledNamesAllocator.Allocate<char>(Length);
+ std::copy(Buffer.begin(), Buffer.end(), Name);
+
+ Str = llvm::StringRef(Name, Length);
+
+ return Str;
+}
- getMangleContext().mangleName(ND, Buffer.getBuffer());
+void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
@@ -333,35 +382,39 @@ llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
- if (Linkage == GVA_Internal) {
+ if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
- } else if (D->hasAttr<DLLExportAttr>()) {
+
+ if (D->hasAttr<DLLExportAttr>())
return llvm::Function::DLLExportLinkage;
- } else if (D->hasAttr<WeakAttr>()) {
+
+ if (D->hasAttr<WeakAttr>())
return llvm::Function::WeakAnyLinkage;
- } else if (Linkage == GVA_C99Inline) {
- // In C99 mode, 'inline' functions are guaranteed to have a strong
- // definition somewhere else, so we can use available_externally linkage.
+
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ if (Linkage == GVA_C99Inline)
return llvm::Function::AvailableExternallyLinkage;
- } else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) {
- // In C++, the compiler has to emit a definition in every translation unit
- // that references the function. We should use linkonce_odr because
- // a) if all references in this translation unit are optimized away, we
- // don't need to codegen it. b) if the function persists, it needs to be
- // merged with other definitions. c) C++ has the ODR, so we know the
- // definition is dependable.
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
return llvm::Function::LinkOnceODRLinkage;
- } else if (Linkage == GVA_ExplicitTemplateInstantiation) {
- // An explicit instantiation of a template has weak linkage, since
- // explicit instantiations can occur in multiple translation units
- // and must all be equivalent. However, we are not allowed to
- // throw away these explicit instantiations.
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_ExplicitTemplateInstantiation)
return llvm::Function::WeakODRLinkage;
- } else {
- assert(Linkage == GVA_StrongExternal);
- // Otherwise, we have strong external linkage.
- return llvm::Function::ExternalLinkage;
- }
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::Function::ExternalLinkage;
}
@@ -521,8 +574,7 @@ void CodeGenModule::EmitDeferred() {
// ignore these cases.
//
// TODO: That said, looking this up multiple times is very wasteful.
- MangleBuffer Name;
- getMangledName(Name, D);
+ llvm::StringRef Name = getMangledName(D);
llvm::GlobalValue *CGRef = GetGlobalValue(Name);
assert(CGRef && "Deferred decl wasn't referenced?");
@@ -586,6 +638,47 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
}
+static CodeGenModule::GVALinkage
+GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
+ // If this is a static data member, compute the kind of template
+ // specialization. Otherwise, this variable is not part of a
+ // template.
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (VD->isStaticDataMember())
+ TSK = VD->getTemplateSpecializationKind();
+
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return CodeGenModule::GVA_Internal;
+
+ case ExternalLinkage:
+ switch (TSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return CodeGenModule::GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Variable should not be instantiated");
+ // Fall through to treat this like any other instantiation.
+
+ case TSK_ExplicitInstantiationDefinition:
+ return CodeGenModule::GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ImplicitInstantiation:
+ return CodeGenModule::GVA_TemplateInstantiation;
+ }
+ }
+
+ return CodeGenModule::GVA_StrongExternal;
+}
+
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified or the decl has
// attribute used.
@@ -634,24 +727,10 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
}
}
- // Static data may be deferred, but out-of-line static data members
- // cannot be.
- Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && getContext().getLangOptions().CPlusPlus &&
- VD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- // Initializer has side effects?
- if (VD->getInit() && VD->getInit()->HasSideEffects(Context))
- return false;
- return !(VD->isStaticDataMember() && VD->isOutOfLine());
-
- case ExternalLinkage:
- break;
+ GVALinkage L = GetLinkageForVariable(getContext(), VD);
+ if (L == GVA_Internal || L == GVA_TemplateInstantiation) {
+ if (!(VD->getInit() && VD->getInit()->HasSideEffects(Context)))
+ return true;
}
return false;
@@ -716,8 +795,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// If the value has already been used, add it directly to the
// DeferredDeclsToEmit list.
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+ llvm::StringRef MangledName = getMangledName(GD);
if (GetGlobalValue(MangledName))
DeferredDeclsToEmit.push_back(GD);
else {
@@ -735,18 +813,27 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
Context.getSourceManager(),
"Generating code for declaration");
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- if (Method->isVirtual())
- getVTables().EmitThunks(GD);
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ getFunctionLinkage(Function)
+ == llvm::Function::AvailableExternallyLinkage)
+ return;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
- return EmitCXXConstructor(CD, GD.getCtorType());
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ return EmitCXXConstructor(CD, GD.getCtorType());
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
- return EmitCXXDestructor(DD, GD.getDtorType());
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Method))
+ return EmitCXXDestructor(DD, GD.getDtorType());
+ }
- if (isa<FunctionDecl>(D))
return EmitGlobalFunctionDefinition(GD);
+ }
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return EmitGlobalVarDefinition(VD);
@@ -797,6 +884,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
std::vector<const llvm::Type*>(), false);
IsIncompleteFunction = true;
}
+
llvm::Function *F = llvm::Function::Create(FTy,
llvm::Function::ExternalLinkage,
MangledName, &getModule());
@@ -857,8 +945,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+
+ llvm::StringRef MangledName = getMangledName(GD);
return GetOrCreateLLVMFunction(MangledName, Ty, GD);
}
@@ -961,8 +1049,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
const llvm::PointerType *PTy =
llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
- MangleBuffer MangledName;
- getMangledName(MangledName, D);
+ llvm::StringRef MangledName = getMangledName(D);
return GetOrCreateLLVMGlobal(MangledName, PTy, D);
}
@@ -981,8 +1068,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
// If we have not seen a reference to this variable yet, place it
// into the deferred declarations table to be emitted if needed
// later.
- MangleBuffer MangledName;
- getMangledName(MangledName, D);
+ llvm::StringRef MangledName = getMangledName(D);
if (!GetGlobalValue(MangledName)) {
DeferredDecls[MangledName] = D;
return;
@@ -1008,7 +1094,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
// If this class has a key function, use that to determine the linkage of
// the vtable.
const FunctionDecl *Def = 0;
- if (KeyFunction->getBody(Def))
+ if (KeyFunction->hasBody(Def))
KeyFunction = cast<CXXMethodDecl>(Def);
switch (KeyFunction->getTemplateSpecializationKind()) {
@@ -1049,47 +1135,6 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::WeakODRLinkage;
}
-static CodeGenModule::GVALinkage
-GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
- // If this is a static data member, compute the kind of template
- // specialization. Otherwise, this variable is not part of a
- // template.
- TemplateSpecializationKind TSK = TSK_Undeclared;
- if (VD->isStaticDataMember())
- TSK = VD->getTemplateSpecializationKind();
-
- Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
- VD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- return CodeGenModule::GVA_Internal;
-
- case ExternalLinkage:
- switch (TSK) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- return CodeGenModule::GVA_StrongExternal;
-
- case TSK_ExplicitInstantiationDeclaration:
- llvm_unreachable("Variable should not be instantiated");
- // Fall through to treat this like any other instantiation.
-
- case TSK_ExplicitInstantiationDefinition:
- return CodeGenModule::GVA_ExplicitTemplateInstantiation;
-
- case TSK_ImplicitInstantiation:
- return CodeGenModule::GVA_TemplateInstantiation;
- }
- }
-
- return CodeGenModule::GVA_StrongExternal;
-}
-
CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
return CharUnits::fromQuantity(
TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
@@ -1367,8 +1412,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
const AliasAttr *AA = D->getAttr<AliasAttr>();
assert(AA && "Not an alias?");
- MangleBuffer MangledName;
- getMangledName(MangledName, GD);
+ llvm::StringRef MangledName = getMangledName(GD);
// If there is a definition in the module, then it wins over the alias.
// This is dubious, but allow it to be safe. Just ignore the alias.
@@ -1409,7 +1453,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
Entry->getType()));
Entry->eraseFromParent();
} else {
- GA->setName(MangledName.getString());
+ GA->setName(MangledName);
}
// Set attributes which are particular to an alias; this is a
@@ -1418,7 +1462,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
if (D->hasAttr<DLLExportAttr>()) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// The dllexport attribute is ignored for undefined symbols.
- if (FD->getBody())
+ if (FD->hasBody())
GA->setLinkage(llvm::Function::DLLExportLinkage);
} else {
GA->setLinkage(llvm::Function::DLLExportLinkage);
@@ -2004,3 +2048,73 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
assert(isa<TypeDecl>(D) && "Unsupported decl kind");
}
}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ const llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Value *Ops[] = {
+ Addr,
+ GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
+ };
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops, 2));
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ // StaticLocalDeclMap
+ for (llvm::DenseMap<GlobalDecl,llvm::StringRef>::iterator
+ I = MangledDeclNames.begin(), E = MangledDeclNames.end();
+ I != E; ++I) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator
+ I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::Value *Addr = I->second;
+
+ if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, &DAddr, 1));
+ } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
index 319744c4..27f15fc 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -75,6 +75,25 @@ namespace CodeGen {
class CGObjCRuntime;
class MangleBuffer;
+ struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority &&
+ lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ if (priority < RHS.priority)
+ return true;
+
+ return priority == RHS.priority && lex_order < RHS.lex_order;
+ }
+ };
+
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
class CodeGenModule : public BlockModule {
@@ -130,6 +149,10 @@ class CodeGenModule : public BlockModule {
/// priorities to be emitted when the translation unit is complete.
CtorList GlobalDtors;
+ /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
+ llvm::DenseMap<GlobalDecl, llvm::StringRef> MangledDeclNames;
+ llvm::BumpPtrAllocator MangledNamesAllocator;
+
std::vector<llvm::Constant*> Annotations;
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
@@ -139,10 +162,16 @@ class CodeGenModule : public BlockModule {
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// - Global variables with initializers whose order of initialization
+ /// is set by init_priority attribute.
+
+ llvm::SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ PrioritizedCXXGlobalInits;
/// CXXGlobalDtors - Global destructor functions and arguments that need to
/// run on termination.
- std::vector<std::pair<llvm::Constant*,llvm::Constant*> > CXXGlobalDtors;
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
@@ -315,6 +344,10 @@ public:
llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
+ // GetCXXMemberFunctionPointerValue - Given a method declaration, return the
+ // integer used in a member function pointer to refer to that value.
+ llvm::Constant *GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD);
+
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
@@ -346,7 +379,9 @@ public:
/// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
/// destructor function.
- void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object);
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+ }
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
@@ -409,9 +444,13 @@ public:
/// which only apply to a function definintion.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
- /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
/// as a return type.
- bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used
+ /// as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
/// ConstructAttributeList - Get the LLVM attributes and calling convention to
/// use for a particular function type.
@@ -427,15 +466,8 @@ public:
AttributeListType &PAL,
unsigned &CallingConv);
- void getMangledName(MangleBuffer &Buffer, GlobalDecl D);
- void getMangledName(MangleBuffer &Buffer, const NamedDecl *ND);
- void getMangledName(MangleBuffer &Buffer, const BlockDecl *BD);
- void getMangledCXXCtorName(MangleBuffer &Buffer,
- const CXXConstructorDecl *D,
- CXXCtorType Type);
- void getMangledCXXDtorName(MangleBuffer &Buffer,
- const CXXDestructorDecl *D,
- CXXDtorType Type);
+ llvm::StringRef getMangledName(GlobalDecl GD);
+ void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD);
void EmitTentativeDefinition(const VarDecl *D);
@@ -566,6 +598,8 @@ private:
/// references to global which may otherwise be optimized out.
void EmitLLVMUsed(void);
+ void EmitDeclMetadata();
+
/// MayDeferGeneration - Determine if the given decl can be emitted
/// lazily; this is only relevant for definitions. The given decl
/// must be either a function or var decl.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
index a46dc72..d469b90 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -42,11 +42,13 @@ CodeGenTypes::~CodeGenTypes() {
delete &*I++;
}
-/// ConvertType - Convert the specified type to its LLVM form.
-const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
- llvm::PATypeHolder Result = ConvertTypeRecursive(T);
-
- // Any pointers that were converted defered evaluation of their pointee type,
+/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+/// pointers that are referenced but have not been converted yet. This is used
+/// to handle cyclic structures properly.
+void CodeGenTypes::HandleLateResolvedPointers() {
+ assert(!PointersToResolve.empty() && "No pointers to resolve!");
+
+ // Any pointers that were converted deferred evaluation of their pointee type,
// creating an opaque type instead. This is in order to avoid problems with
// circular types. Loop through all these defered pointees, if any, and
// resolve them now.
@@ -59,7 +61,21 @@ const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
P.second->refineAbstractTypeTo(NT);
}
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
+ const llvm::Type *Result = ConvertTypeRecursive(T);
+
+ // If this is a top-level call to ConvertType and sub-conversions caused
+ // pointers to get lazily built as opaque types, resolve the pointers, which
+ // might cause Result to be merged away.
+ if (!IsRecursive && !PointersToResolve.empty()) {
+ llvm::PATypeHolder ResultHandle = Result;
+ HandleLateResolvedPointers();
+ Result = ResultHandle;
+ }
return Result;
}
@@ -80,21 +96,12 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
return ResultType;
}
-const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
- const llvm::Type *ResultType = ConvertTypeRecursive(T);
- if (ResultType->isIntegerTy(1))
- return llvm::IntegerType::get(getLLVMContext(),
- (unsigned)Context.getTypeSize(T));
- // FIXME: Should assert that the llvm type and AST type has the same size.
- return ResultType;
-}
-
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
-const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
- const llvm::Type *R = ConvertType(T);
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
+ const llvm::Type *R = ConvertType(T, IsRecursive);
// If this is a non-bool type, don't map it.
if (!R->isIntegerTy(1))
@@ -108,7 +115,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
// Code to verify a given function type is complete, i.e. the return type
// and all of the argument types are complete.
-static const TagType *VerifyFuncTypeComplete(const Type* T) {
+const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
const FunctionType *FT = cast<FunctionType>(T);
if (const TagType* TT = FT->getResultType()->getAs<TagType>())
if (!TT->getDecl()->isDefinition())
@@ -201,7 +208,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::ObjCSel:
// LLVM void type can only be used as the result of a function call. Just
// map to the same as char.
- return llvm::IntegerType::get(getLLVMContext(), 8);
+ return llvm::Type::getInt8Ty(getLLVMContext());
case BuiltinType::Bool:
// Note that we always return bool as i1 for use as a scalar type.
@@ -233,7 +240,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
case BuiltinType::NullPtr: {
// Model std::nullptr_t as i8*
- const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8);
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
return llvm::PointerType::getUnqual(Ty);
}
@@ -284,7 +291,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
assert(A.getIndexTypeCVRQualifiers() == 0 &&
"FIXME: We only handle trivial array types so far!");
// int X[] -> [0 x int]
- return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
+ 0);
}
case Type::ConstantArray: {
const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
@@ -299,8 +307,12 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
}
case Type::FunctionNoProto:
case Type::FunctionProto: {
- // First, check whether we can build the full function type.
- if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type. Instead, turn it into an Opaque pointer
+ // and have UpdateCompletedType revisit the function type when/if the opaque
+ // argument type is defined.
+ if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
// This function's type depends on an incomplete tag type; make sure
// we have an opaque type corresponding to the tag type.
ConvertTagDeclType(TT->getDecl());
@@ -309,17 +321,25 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
FunctionTypes.insert(std::make_pair(&Ty, ResultType));
return ResultType;
}
+
// The function type can be built; call the appropriate routines to
// build it.
- if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
- return GetFunctionType(getFunctionInfo(
- CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT,0))),
- FPT->isVariadic());
-
- const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
- return GetFunctionType(getFunctionInfo(
- CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT,0))),
- true);
+ const CGFunctionInfo *FI;
+ bool isVariadic;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
+ FI = &getFunctionInfo(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
+ true /*Recursive*/);
+ isVariadic = FPT->isVariadic();
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ FI = &getFunctionInfo(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
+ true /*Recursive*/);
+ isVariadic = true;
+ }
+
+ return GetFunctionType(*FI, isVariadic, true);
}
case Type::ObjCObject:
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
index fc28c3a..c7f48e6 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -94,6 +94,12 @@ private:
/// is available only for ConvertType(). CovertType() is preferred
/// interface to convert type T into a llvm::Type.
const llvm::Type *ConvertNewType(QualType T);
+
+ /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
+ /// pointers that are referenced but have not been converted yet. This is
+ /// used to handle cyclic structures properly.
+ void HandleLateResolvedPointers();
+
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
const ABIInfo &Info);
@@ -106,22 +112,29 @@ public:
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
- const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertType(QualType T, bool IsRecursive = false);
const llvm::Type *ConvertTypeRecursive(QualType T);
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
- const llvm::Type *ConvertTypeForMem(QualType T);
- const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+ const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T) {
+ return ConvertTypeForMem(T, true);
+ }
/// GetFunctionType - Get the LLVM function type for \arg Info.
const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
- bool IsVariadic);
+ bool IsVariadic,
+ bool IsRecursive = false);
const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+ /// VerifyFuncTypeComplete - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ static const TagType *VerifyFuncTypeComplete(const Type* T);
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
@@ -150,8 +163,11 @@ public:
return getFunctionInfo(Ty->getResultType(), Args,
Ty->getExtInfo());
}
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
- const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
+
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty,
+ bool IsRecursive = false);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
+ bool IsRecursive = false);
// getFunctionInfo - Get the function info for a member function.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
@@ -172,7 +188,8 @@ public:
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- const FunctionType::ExtInfo &Info);
+ const FunctionType::ExtInfo &Info,
+ bool IsRecursive = false);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
@@ -185,7 +202,8 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// GetExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
- void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
+ bool IsRecursive);
/// ContainsPointerToDataMember - Return whether the given type contains a
/// pointer to a data member.
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
index b8a98d7..26dea40 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h
@@ -36,7 +36,7 @@ class GlobalDecl {
Value.setPointer(D);
}
-
+
public:
GlobalDecl() {}
@@ -50,6 +50,14 @@ public:
GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
: Value(D, Type) {}
+ GlobalDecl getCanonicalDecl() const {
+ GlobalDecl CanonGD;
+ CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
+ CanonGD.Value.setInt(Value.getInt());
+
+ return CanonGD;
+ }
+
const Decl *getDecl() const { return Value.getPointer(); }
CXXCtorType getCtorType() const {
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Makefile b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
index 3cea6bb..4b93524 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Makefile
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Makefile
@@ -12,14 +12,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangCodeGen
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-ifdef CLANG_VENDOR
-CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
-endif
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
index 6c2a648..30ee541 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp
@@ -40,7 +40,7 @@ MiscNameMangler::MiscNameMangler(MangleContext &C,
llvm::SmallVectorImpl<char> &Res)
: Context(C), Out(Res) { }
-void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
+void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) {
// Mangle the context of the block.
// FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
// desired. Come up with a better mangling scheme.
@@ -55,6 +55,16 @@ void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
const NamedDecl *ND = cast<NamedDecl>(DC);
if (IdentifierInfo *II = ND->getIdentifier())
Out << II->getName();
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ Out << Buffer;
+ }
+ else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) {
+ llvm::SmallString<64> Buffer;
+ Context.mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ Out << Buffer;
+ }
else {
// FIXME: We were doing a mangleUnqualifiedName() before, but that's
// a private member of a class that will soon itself be private to the
@@ -125,19 +135,24 @@ class CXXNameMangler {
const CXXMethodDecl *Structor;
unsigned StructorType;
+ /// SeqID - The next subsitution sequence number.
+ unsigned SeqID;
+
llvm::DenseMap<uintptr_t, unsigned> Substitutions;
ASTContext &getASTContext() const { return Context.getASTContext(); }
public:
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
- : Context(C), Out(Res), Structor(0), StructorType(0) { }
+ : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { }
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+ : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
#if MANGLE_CHECKER
~CXXNameMangler() {
@@ -154,7 +169,9 @@ public:
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
void mangleType(QualType T);
@@ -215,6 +232,7 @@ private:
#include "clang/AST/TypeNodes.def"
void mangleType(const TagType*);
+ void mangleType(TemplateName);
void mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType);
@@ -279,7 +297,7 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
if (!FD) {
const DeclContext *DC = D->getDeclContext();
// Check for extern variable declared locally.
- if (isa<FunctionDecl>(DC) && D->hasLinkage())
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = DC->getParent();
if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
@@ -357,12 +375,6 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
mangleBareFunctionType(FT, MangleReturnType);
}
-/// isStd - Return whether a given namespace is the 'std' namespace.
-static bool isStd(const NamespaceDecl *NS) {
- const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
- return II && II->isStr("std");
-}
-
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
while (isa<LinkageSpecDecl>(DC)) {
DC = DC->getParent();
@@ -371,15 +383,21 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
return DC;
}
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
// isStdNamespace - Return whether a given decl context is a toplevel 'std'
// namespace.
static bool isStdNamespace(const DeclContext *DC) {
if (!DC->isNamespace())
return false;
- if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit())
- return false;
-
return isStd(cast<NamespaceDecl>(DC));
}
@@ -511,6 +529,21 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
addSubstitution(Template);
}
+void CXXNameMangler::mangleFloat(const llvm::APFloat &F) {
+ // TODO: avoid this copy with careful stream management.
+ llvm::SmallString<20> Buffer;
+ F.bitcastToAPInt().toString(Buffer, 16, false);
+ Out.write(Buffer.data(), Buffer.size());
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
void CXXNameMangler::mangleNumber(int64_t Number) {
// <number> ::= [n] <non-negative decimal integer>
if (Number < 0) {
@@ -593,6 +626,28 @@ void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
mangleUnqualifiedName(0, Name, KnownArity);
}
+static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
+ assert(RD->isAnonymousStructOrUnion() &&
+ "Expected anonymous struct or union!");
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ if (FD->getIdentifier())
+ return FD;
+
+ if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const FieldDecl *NamedDataMember =
+ FindFirstNamedDataMember(RT->getDecl()))
+ return NamedDataMember;
+ }
+ }
+
+ // We didn't find a named data member.
+ return 0;
+}
+
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name,
unsigned KnownArity) {
@@ -625,6 +680,28 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ const FieldDecl *FD = FindFirstNamedDataMember(RD);
+ assert(FD && "Didn't find a named data member!");
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
@@ -808,7 +885,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
manglePrefix(DC->getParent(), NoFunction);
llvm::SmallString<64> Name;
- Context.mangleBlock(Block, Name);
+ Context.mangleBlock(GlobalDecl(), Block, Name);
Out << Name.size() << Name;
return;
}
@@ -880,6 +957,53 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
addSubstitution(ND);
}
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+ break;
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedScope(Dependent->getQualifier());
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ }
+
+ addSubstitution(TN);
+}
+
void
CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
switch (OO) {
@@ -1001,6 +1125,18 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
if (Quals.hasConst())
Out << 'K';
+ if (Quals.hasAddressSpace()) {
+ // Extension:
+ //
+ // <type> ::= U <address-space-number>
+ //
+ // where <address-space-number> is a source name consisting of 'AS'
+ // followed by the address space <number>.
+ llvm::SmallString<64> ASString;
+ ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
+ Out << 'U' << ASString.size() << ASString;
+ }
+
// FIXME: For now, just drop all extension qualifiers on the floor.
}
@@ -1138,7 +1274,8 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
if (MangleReturnType)
mangleType(Proto->getResultType());
- if (Proto->getNumArgs() == 0) {
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
Out << 'v';
return;
}
@@ -1204,6 +1341,22 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) {
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table. We can't actually add it because we don't want this
+ // particular function type to be substituted.
+ ++SeqID;
} else
mangleType(PointeeType);
}
@@ -1213,8 +1366,6 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
mangleTemplateParameter(T->getIndex());
}
-// FIXME: <type> ::= <template-template-param> <template-args>
-
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
@@ -1244,12 +1395,20 @@ void CXXNameMangler::mangleType(const ComplexType *T) {
}
// GNU extension: vector types
-// <type> ::= <vector-type>
-// <vector-type> ::= Dv <positive dimension number> _ <element type>
-// ::= Dv [<dimension expression>] _ <element type>
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
void CXXNameMangler::mangleType(const VectorType *T) {
Out << "Dv" << T->getNumElements() << '_';
- mangleType(T->getElementType());
+ if (T->getAltiVecSpecific() == VectorType::Pixel)
+ Out << 'p';
+ else if (T->getAltiVecSpecific() == VectorType::Bool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const ExtVectorType *T) {
mangleType(static_cast<const VectorType*>(T));
@@ -1303,23 +1462,25 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
void CXXNameMangler::mangleType(const DependentNameType *T) {
// Typename types are always nested
Out << 'N';
- if (T->getIdentifier()) {
- mangleUnresolvedScope(T->getQualifier());
- mangleSourceName(T->getIdentifier());
- } else {
- const TemplateSpecializationType *TST = T->getTemplateId();
- if (!mangleSubstitution(QualType(TST, 0))) {
- mangleTemplatePrefix(TST->getTemplateName());
-
- // FIXME: GCC does not appear to mangle the template arguments when
- // the template in question is a dependent template name. Should we
- // emulate that badness?
- mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
- TST->getNumArgs());
- addSubstitution(QualType(TST, 0));
- }
- }
-
+ mangleUnresolvedScope(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are always nested
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
Out << 'E';
}
@@ -1369,9 +1530,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
// Boolean values are encoded as 0/1.
Out << (Value.getBoolValue() ? '1' : '0');
} else {
- if (Value.isNegative())
- Out << 'n';
- Value.abs().print(Out, false);
+ mangleNumber(Value);
}
Out << 'E';
@@ -1435,10 +1594,44 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
#define STMT(Type, Base) \
case Expr::Type##Class:
#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::BlockDeclRefExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::InitListExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::CXXScalarValueInitExprClass:
llvm_unreachable("unexpected statement kind");
break;
- default: {
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCSuperExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::VAArgExprClass: {
// As bad as this diagnostic is, it's better than crashing.
Diagnostic &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
@@ -1450,6 +1643,11 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr());
+ break;
+
+ case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
Out << "cl";
@@ -1460,6 +1658,26 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXNewExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ Out << "pi";
+ for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(),
+ E = New->constructor_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ }
+ Out << 'E';
+ break;
+ }
+
case Expr::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
@@ -1533,6 +1751,43 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(E);
mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
@@ -1541,6 +1796,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
}
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically wierd form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
@@ -1657,12 +1924,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
Out << 'L';
mangleType(FL->getType());
-
- // TODO: avoid this copy with careful stream management.
- llvm::SmallString<20> Buffer;
- FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
- Out.write(Buffer.data(), Buffer.size());
-
+ mangleFloat(FL->getValue());
Out << 'E';
break;
}
@@ -1680,16 +1942,62 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << 'E';
break;
- case Expr::IntegerLiteralClass:
- mangleIntegerLiteral(E->getType(),
- llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
break;
+ }
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandervoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << '0' << '_';
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
}
-}
-// FIXME: <type> ::= G <type> # imaginary (C 2000)
-// FIXME: <type> ::= U <source-name> <type> # vendor extended type qualifier
+ case Expr::StringLiteralClass: {
+ // Proposal from David Vandervoorde, 2010.06.30.
+ // I've sent a comment off asking whether this needs to also
+ // represent the length of the string.
+ Out << 'L';
+ const ConstantArrayType *T = cast<ConstantArrayType>(E->getType());
+ QualType CharTy = T->getElementType().getUnqualifiedType();
+ mangleType(CharTy);
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ }
+}
void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
// <ctor-dtor-name> ::= C1 # complete object constructor
@@ -1774,9 +2082,8 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
mangleType(A.getAsType());
break;
case TemplateArgument::Template:
- assert(A.getAsTemplate().getAsTemplateDecl() &&
- "Can't get dependent template names here");
- mangleName(A.getAsTemplate().getAsTemplateDecl());
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
break;
case TemplateArgument::Expression:
Out << 'X';
@@ -1882,7 +2189,7 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
while (SeqID) {
assert(BufferPtr > Buffer && "Buffer overflow!");
- unsigned char c = static_cast<unsigned char>(SeqID) % 36;
+ char c = static_cast<char>(SeqID % 36);
*--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
SeqID /= 36;
@@ -2049,10 +2356,8 @@ void CXXNameMangler::addSubstitution(TemplateName Template) {
}
void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
- unsigned SeqID = Substitutions.size();
-
assert(!Substitutions.count(Ptr) && "Substitution already exists!");
- Substitutions[Ptr] = SeqID;
+ Substitutions[Ptr] = SeqID++;
}
//
@@ -2092,10 +2397,10 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
Mangler.mangle(D);
}
-void MangleContext::mangleBlock(const BlockDecl *BD,
+void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD,
llvm::SmallVectorImpl<char> &Res) {
MiscNameMangler Mangler(*this, Res);
- Mangler.mangleBlock(BD);
+ Mangler.mangleBlock(GD, BD);
}
void MangleContext::mangleThunk(const CXXMethodDecl *MD,
@@ -2155,6 +2460,15 @@ void MangleContext::mangleGuardVariable(const VarDecl *D,
Mangler.mangleName(D);
}
+void MangleContext::mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &Res) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Res);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+}
+
void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<char> &Res) {
// <special-name> ::= TV <type> # virtual table
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
index f1c5358..139f6c0 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
+++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h
@@ -19,6 +19,7 @@
#define LLVM_CLANG_CODEGEN_MANGLE_H
#include "CGCXX.h"
+#include "GlobalDecl.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
@@ -84,6 +85,8 @@ public:
Diagnostic &Diags)
: Context(Context), Diags(Diags) { }
+ virtual ~MangleContext() { }
+
ASTContext &getASTContext() const { return Context; }
Diagnostic &getDiags() const { return Diags; }
@@ -108,7 +111,7 @@ public:
/// @name Mangler Entry Points
/// @{
- bool shouldMangleDeclName(const NamedDecl *D);
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
virtual void mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
@@ -118,6 +121,8 @@ public:
llvm::SmallVectorImpl<char> &);
virtual void mangleGuardVariable(const VarDecl *D,
llvm::SmallVectorImpl<char> &);
+ virtual void mangleReferenceTemporary(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
virtual void mangleCXXVTable(const CXXRecordDecl *RD,
llvm::SmallVectorImpl<char> &);
virtual void mangleCXXVTT(const CXXRecordDecl *RD,
@@ -131,7 +136,8 @@ public:
llvm::SmallVectorImpl<char> &);
virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
llvm::SmallVectorImpl<char> &);
- void mangleBlock(const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
+ void mangleBlock(GlobalDecl GD,
+ const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
void mangleInitDiscriminator() {
Discriminator = 0;
@@ -161,7 +167,7 @@ public:
llvm::raw_svector_ostream &getStream() { return Out; }
- void mangleBlock(const BlockDecl *BD);
+ void mangleBlock(GlobalDecl GD, const BlockDecl *BD);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
};
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..da0fdb6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,1191 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGVTables.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ Diagnostic &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+};
+
+class MicrosoftCXXABI : public CXXABI {
+ MicrosoftMangleContext MangleCtx;
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM)
+ : MangleCtx(CGM.getContext(), CGM.getDiags()) {}
+
+ MicrosoftMangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOptions().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ llvm::StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ assert(false && "Can't mangle constructors yet!");
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ assert(false && "Can't mangle destructors yet!");
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ assert(false && "Don't know how to mangle literal operators yet!");
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(GlobalDecl(), BD, Name);
+ Out << Name << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ assert(false && "Don't know how to mangle ?:");
+ break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _D # __int8 (yup, it's a distinct type in MSVC)
+ // ::= _E # unsigned __int8
+ // ::= _F # __int16
+ // ::= _G # unsigned __int16
+ // ::= _H # __int32
+ // ::= _I # unsigned __int32
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ // TODO: __int8 and friends
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar: Out << "_W"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ case BuiltinType::UndeducedAuto:
+ assert(0 && "Should not see undeduced auto here");
+ break;
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::NullPtr:
+ assert(false && "Don't know how to mangle this type");
+ break;
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ switch (T->getCallConv()) {
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ llvm::SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ assert(false && "Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ assert(false && "Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ assert(false && "Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ assert(false && "Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ assert(false && "Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ assert(false && "Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ assert(false && "Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ assert(false && "Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ assert(false &&
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ assert(false && "Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ assert(false && "Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ assert(false && "Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ llvm::SmallVectorImpl<char> &Name) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Name);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle guard variables!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle constructors!");
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &) {
+ assert(false && "Can't yet mangle destructors!");
+}
+
+CXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
index 9905ca6..6d9d277 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -13,7 +13,7 @@
#include "clang/CodeGen/ModuleBuilder.h"
#include "CodeGenModule.h"
-#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
index b29d3cb..c65f203 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -17,6 +17,7 @@
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "llvm/Type.h"
+#include "llvm/Target/TargetData.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/raw_ostream.h"
@@ -280,7 +281,9 @@ class DefaultABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -316,6 +319,10 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
ASTContext &Context;
@@ -343,7 +350,9 @@ public:
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -599,8 +608,7 @@ llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -657,9 +665,17 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
return false;
}
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
namespace {
/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ const llvm::TargetData &TD;
+
enum Class {
Integer = 0,
SSE,
@@ -680,7 +696,7 @@ class X86_64ABIInfo : public ABIInfo {
/// always be either NoClass or the result of a previous merge
/// call. In addition, this should never be Memory (the caller
/// should just return Memory for the aggregate).
- Class merge(Class Accum, Class Field) const;
+ static Class merge(Class Accum, Class Field);
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
@@ -703,8 +719,7 @@ class X86_64ABIInfo : public ABIInfo {
///
/// If the \arg Lo class is ComplexX87, then the \arg Hi class will
/// also be ComplexX87.
- void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
- Class &Lo, Class &Hi) const;
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
/// getCoerceResult - Given a source type \arg Ty and an LLVM type
/// to coerce to, chose the best way to pass Ty in the same place
@@ -716,30 +731,33 @@ class X86_64ABIInfo : public ABIInfo {
/// type. This makes this code more explicit, and it makes it clearer that we
/// are also doing this for correctness in the case of passing scalar types.
ABIArgInfo getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const;
+ const llvm::Type *CoerceTo) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
- ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const;
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const;
+ ABIArgInfo getIndirectResult(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
llvm::LLVMContext &VMContext) const;
ABIArgInfo classifyArgumentType(QualType Ty,
- ASTContext &Context,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
- unsigned &neededSSE) const;
+ unsigned &neededSSE,
+ const llvm::Type *PrefType) const;
public:
+ X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td)
+ : Context(Ctx), TD(td) {}
+
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -747,7 +765,8 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {}
+ X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD)
+ : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 7;
@@ -771,8 +790,7 @@ public:
}
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
- Class Field) const {
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
// considered. The resulting class is calculated according to
@@ -800,22 +818,19 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
"Invalid accumulated classification during merge.");
if (Accum == Field || Field == NoClass)
return Accum;
- else if (Field == Memory)
+ if (Field == Memory)
return Memory;
- else if (Accum == NoClass)
+ if (Accum == NoClass)
return Field;
- else if (Accum == Integer || Field == Integer)
+ if (Accum == Integer || Field == Integer)
return Integer;
- else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
return Memory;
- else
- return SSE;
+ return SSE;
}
-void X86_64ABIInfo::classify(QualType Ty,
- ASTContext &Context,
- uint64_t OffsetBase,
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo, Class &Hi) const {
// FIXME: This code can be simplified by introducing a simple value class for
// Class pairs with appropriate constructor methods for the various
@@ -848,17 +863,29 @@ void X86_64ABIInfo::classify(QualType Ty,
}
// FIXME: _Decimal32 and _Decimal64 are SSE.
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
- } else if (Ty->hasPointerRepresentation()) {
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
Current = Integer;
- } else if (Ty->isMemberPointerType()) {
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
if (Ty->isMemberFunctionPointerType())
Lo = Hi = Integer;
else
Current = Integer;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
uint64_t Size = Context.getTypeSize(VT);
if (Size == 32) {
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
@@ -890,11 +917,14 @@ void X86_64ABIInfo::classify(QualType Ty,
Lo = SSE;
Hi = SSEUp;
}
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
QualType ET = Context.getCanonicalType(CT->getElementType());
uint64_t Size = Context.getTypeSize(Ty);
- if (ET->isIntegralType()) {
+ if (ET->isIntegralOrEnumerationType()) {
if (Size <= 64)
Current = Integer;
else if (Size <= 128)
@@ -912,7 +942,11 @@ void X86_64ABIInfo::classify(QualType Ty,
uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
if (Hi == NoClass && EB_Real != EB_Imag)
Hi = Lo;
- } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
uint64_t Size = Context.getTypeSize(Ty);
@@ -936,7 +970,7 @@ void X86_64ABIInfo::classify(QualType Ty,
uint64_t ArraySize = AT->getSize().getZExtValue();
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
- classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -947,7 +981,10 @@ void X86_64ABIInfo::classify(QualType Ty,
if (Hi == Memory)
Lo = Memory;
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
uint64_t Size = Context.getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
@@ -988,7 +1025,7 @@ void X86_64ABIInfo::classify(QualType Ty,
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -1047,7 +1084,7 @@ void X86_64ABIInfo::classify(QualType Ty,
FieldHi = EB_Hi ? Integer : NoClass;
}
} else
- classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
@@ -1074,9 +1111,8 @@ void X86_64ABIInfo::classify(QualType Ty,
}
ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo,
- ASTContext &Context) const {
- if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
+ const llvm::Type *CoerceTo) const {
+ if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) {
// Integer and pointer types will end up in a general purpose
// register.
@@ -1084,10 +1120,21 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
+ if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation())
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
+
+ // If this is a 8/16/32-bit structure that is passed as an int64, then it
+ // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same
+ // as how an i8/i16/i32 is passed. Coerce to a i8/i16/i32 instead of a i64.
+ switch (Context.getTypeSizeInChars(Ty).getQuantity()) {
+ default: break;
+ case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break;
+ case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break;
+ case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break;
+ }
+
+ } else if (CoerceTo->isDoubleTy()) {
assert(Ty.isCanonical() && "should always have a canonical type here");
assert(!Ty.hasQualifiers() && "should never have a qualified type here");
@@ -1095,13 +1142,17 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
return ABIArgInfo::getDirect();
+ // If this is a 32-bit structure that is passed as a double, then it will be
+ // passed in the low 32-bits of the XMM register, which is the same as how a
+ // float is passed. Coerce to a float instead of a double.
+ if (Context.getTypeSizeInChars(Ty).getQuantity() == 4)
+ CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
}
return ABIArgInfo::getCoerce(CoerceTo);
}
-ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
- ASTContext &Context) const {
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
@@ -1116,8 +1167,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty,
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- ASTContext &Context) const {
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
@@ -1141,13 +1191,12 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, Context, 0, Lo, Hi);
+ classify(RetTy, 0, Lo, Hi);
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
@@ -1166,7 +1215,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
case Memory:
- return getIndirectReturnResult(RetTy, Context);
+ return getIndirectReturnResult(RetTy);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
@@ -1236,15 +1285,40 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
break;
}
- return getCoerceResult(RetTy, ResType, Context);
+ return getCoerceResult(RetTy, ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType,
+ unsigned Offset,
+ const llvm::TargetData &TD) {
+ if (PrefType == 0) return 0;
+
+ // Pointers are always 8-bytes at offset 0.
+ if (Offset == 0 && isa<llvm::PointerType>(PrefType))
+ return PrefType;
+
+ // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that
+ // the "hole" is not used in the containing struct (just undef padding).
+ const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType);
+ if (STy == 0) return 0;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ if (Offset >= SL->getSizeInBytes()) return 0;
+
+ unsigned FieldIdx = SL->getElementContainingOffset(Offset);
+ Offset -= SL->getElementOffset(FieldIdx);
+
+ return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
llvm::LLVMContext &VMContext,
unsigned &neededInt,
- unsigned &neededSSE) const {
+ unsigned &neededSSE,
+ const llvm::Type *PrefType)const{
X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, Context, 0, Lo, Hi);
+ classify(Ty, 0, Lo, Hi);
// Check some invariants.
// FIXME: Enforce these by construction.
@@ -1267,7 +1341,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
- return getIndirectResult(Ty, Context);
+ return getIndirectResult(Ty);
case SSEUp:
case X87Up:
@@ -1277,8 +1351,16 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
// available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
// and %r9 is used.
case Integer:
- ++neededInt;
+ // It is always safe to classify this as an i64 argument.
ResType = llvm::Type::getInt64Ty(VMContext);
+ ++neededInt;
+
+ // If we can choose a better 8-byte type based on the preferred type, and if
+ // that type is still passed in a GPR, use it.
+ if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD))
+ if (isa<llvm::IntegerType>(PrefTypeLo) ||
+ isa<llvm::PointerType>(PrefTypeLo))
+ ResType = PrefTypeLo;
break;
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
@@ -1301,11 +1383,22 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
break;
case NoClass: break;
- case Integer:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getInt64Ty(VMContext), NULL);
+
+ case Integer: {
+ // It is always safe to classify this as an i64 argument.
+ const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext);
++neededInt;
+
+ // If we can choose a better 8-byte type based on the preferred type, and if
+ // that type is still passed in a GPR, use it.
+ if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD))
+ if (isa<llvm::IntegerType>(PrefTypeHi) ||
+ isa<llvm::PointerType>(PrefTypeHi))
+ HiType = PrefTypeHi;
+
+ ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL);
break;
+ }
// X87Up generally doesn't occur here (long double is passed in
// memory), except in situations involving unions.
@@ -1325,13 +1418,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
break;
}
- return getCoerceResult(Ty, ResType, Context);
+ return getCoerceResult(Ty, ResType);
}
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- Context, VMContext);
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext);
// Keep track of the number of assigned registers.
unsigned freeIntRegs = 6, freeSSERegs = 8;
@@ -1345,9 +1439,17 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
// get assigned (in left-to-right order) for passing as follows...
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
+ // If the client specified a preferred IR type to use, pass it down to
+ // classifyArgumentType.
+ const llvm::Type *PrefType = 0;
+ if (NumPrefTypes) {
+ PrefType = *PrefTypes++;
+ --NumPrefTypes;
+ }
+
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, Context, VMContext,
- neededInt, neededSSE);
+ it->info = classifyArgumentType(it->type, VMContext,
+ neededInt, neededSSE, PrefType);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -1357,7 +1459,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
freeIntRegs -= neededInt;
freeSSERegs -= neededSSE;
} else {
- it->info = getIndirectResult(it->type, Context);
+ it->info = getIndirectResult(it->type);
}
}
}
@@ -1380,12 +1482,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
// overflow_arg_area = (overflow_arg_area + 15) & ~15;
llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
+ llvm::ConstantInt::get(CGF.Int32Ty, 15);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
- llvm::Type::getInt64Ty(CGF.getLLVMContext()));
- llvm::Value *Mask = llvm::ConstantInt::get(
- llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
+ CGF.Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
@@ -1405,8 +1506,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
llvm::Value *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
- (SizeInBytes + 7) & ~7);
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
"overflow_arg_area.next");
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
@@ -1418,8 +1518,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
- const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
- const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
@@ -1431,8 +1529,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
unsigned neededInt, neededSSE;
Ty = CGF.getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
- neededInt, neededSSE);
+ ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
@@ -1456,21 +1553,16 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
if (neededInt) {
gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs =
- CGF.Builder.CreateICmpULE(gp_offset,
- llvm::ConstantInt::get(i32Ty,
- 48 - neededInt * 8),
- "fits_in_gp");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
}
if (neededSSE) {
fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
- CGF.Builder.CreateICmpULE(fp_offset,
- llvm::ConstantInt::get(i32Ty,
- 176 - neededSSE * 16),
- "fits_in_fp");
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
}
@@ -1525,45 +1617,42 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
+ } else if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
} else {
- if (neededSSE == 1) {
- RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- RegAddr = CGF.Builder.CreateBitCast(RegAddr,
- llvm::PointerType::getUnqual(LTy));
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
- llvm::Value *RegAddrHi =
- CGF.Builder.CreateGEP(RegAddrLo,
- llvm::ConstantInt::get(i32Ty, 16));
- const llvm::Type *DblPtrTy =
- llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
- DoubleTy, NULL);
- llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
- DblPtrTy));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
- RegAddr = CGF.Builder.CreateBitCast(Tmp,
- llvm::PointerType::getUnqual(LTy));
- }
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
}
// AMD64-ABI 3.5.7p5: Step 5. Set:
// l->gp_offset = l->gp_offset + num_gp * 8
// l->fp_offset = l->fp_offset + num_fp * 16.
if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
gp_offset_p);
}
if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
fp_offset_p);
}
@@ -1582,11 +1671,14 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
ResAddr->reserveOperandSpace(2);
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(MemAddr, InMemBlock);
-
return ResAddr;
}
+
+
+//===----------------------------------------------------------------------===//
// PIC16 ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -1600,7 +1692,9 @@ class PIC16ABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -1636,7 +1730,7 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
}
llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+ CodeGenFunction &CGF) const {
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -1719,7 +1813,9 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
}
+//===----------------------------------------------------------------------===//
// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -1749,7 +1845,9 @@ private:
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -1768,7 +1866,9 @@ public:
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -1776,14 +1876,23 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
it->info = classifyArgumentType(it->type, Context, VMContext);
}
- // ARM always overrides the calling convention.
+ const llvm::Triple &Triple(Context.Target.getTriple());
+ llvm::CallingConv::ID DefaultCC;
+ if (Triple.getEnvironmentName() == "gnueabi" ||
+ Triple.getEnvironmentName() == "eabi")
+ DefaultCC = llvm::CallingConv::ARM_AAPCS;
+ else
+ DefaultCC = llvm::CallingConv::ARM_APCS;
+
switch (getABIKind()) {
case APCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ if (DefaultCC != llvm::CallingConv::ARM_APCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
break;
case AAPCS:
- FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
break;
case AAPCS_VFP:
@@ -1808,6 +1917,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
if (isEmptyRecord(Context, Ty, true))
return ABIArgInfo::getIgnore();
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
// FIXME: This is kind of nasty... but there isn't much choice because the ARM
// backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
@@ -1927,6 +2041,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
// Are we following APCS?
if (getABIKind() == APCS) {
if (isEmptyRecord(Context, RetTy, false))
@@ -1976,7 +2095,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
}
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
+ CodeGenFunction &CGF) const {
// FIXME: Need to handle alignment
const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
@@ -1992,8 +2111,7 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
- Builder.CreateGEP(Addr, llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
@@ -2017,7 +2135,9 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
}
}
+//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -2031,7 +2151,9 @@ class SystemZABIInfo : public ABIInfo {
llvm::LLVMContext &VMContext) const;
virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ llvm::LLVMContext &VMContext,
+ const llvm::Type *const *PrefTypes,
+ unsigned NumPrefTypes) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
Context, VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
@@ -2101,7 +2223,9 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
}
}
+//===----------------------------------------------------------------------===//
// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
namespace {
@@ -2138,8 +2262,11 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
}
+//===----------------------------------------------------------------------===//
// MIPS ABI Implementation. This works for both little-endian and
// big-endian variants.
+//===----------------------------------------------------------------------===//
+
namespace {
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
public:
@@ -2195,10 +2322,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
// For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
// free it.
- const llvm::Triple &Triple(getContext().Target.getTriple());
+ const llvm::Triple &Triple = getContext().Target.getTriple();
switch (Triple.getArch()) {
default:
- return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo());
case llvm::Triple::mips:
case llvm::Triple::mipsel:
@@ -2247,6 +2374,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
}
case llvm::Triple::x86_64:
- return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo =
+ new X86_64TargetCodeGenInfo(Context, TheTargetData));
}
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
index b9a3306..f34971b 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
@@ -30,6 +30,7 @@ const char *Action::getClassName(ActionClass AC) {
case AssembleJobClass: return "assembler";
case LinkJobClass: return "linker";
case LipoJobClass: return "lipo";
+ case DsymutilJobClass: return "dsymutil";
}
assert(0 && "invalid class");
@@ -79,3 +80,7 @@ LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
: JobAction(LipoJobClass, Inputs, Type) {
}
+
+DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(DsymutilJobClass, Inputs, Type) {
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
index 7e61a1d..83d0d26 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
@@ -10,47 +10,59 @@
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Option.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang::driver;
-Arg::Arg(ArgClass _Kind, const Option *_Opt, unsigned _Index,
- const Arg *_BaseArg)
- : Kind(_Kind), Opt(_Opt), BaseArg(_BaseArg), Index(_Index), Claimed(false) {
+Arg::Arg(const Option *_Opt, unsigned _Index, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
}
-Arg::~Arg() { }
+Arg::Arg(const Option *_Opt, unsigned _Index,
+ const char *Value0, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
+ Values.push_back(Value0);
+}
+
+Arg::Arg(const Option *_Opt, unsigned _Index,
+ const char *Value0, const char *Value1, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
+ Values.push_back(Value0);
+ Values.push_back(Value1);
+}
+
+Arg::~Arg() {
+ if (OwnsValues) {
+ for (unsigned i = 0, e = Values.size(); i != e; ++i)
+ delete[] Values[i];
+ }
+}
void Arg::dump() const {
llvm::errs() << "<";
- switch (Kind) {
- default:
- assert(0 && "Invalid kind");
-#define P(N) case N: llvm::errs() << #N; break
- P(FlagClass);
- P(PositionalClass);
- P(JoinedClass);
- P(SeparateClass);
- P(CommaJoinedClass);
- P(JoinedAndSeparateClass);
-#undef P
- }
llvm::errs() << " Opt:";
Opt->dump();
llvm::errs() << " Index:" << Index;
- if (isa<CommaJoinedArg>(this) || isa<SeparateArg>(this))
- llvm::errs() << " NumValues:" << getNumValues();
+ llvm::errs() << " Values: [";
+ for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ if (i) llvm::errs() << ", ";
+ llvm::errs() << "'" << Values[i] << "'";
+ }
- llvm::errs() << ">\n";
+ llvm::errs() << "]>\n";
}
std::string Arg::getAsString(const ArgList &Args) const {
- std::string Res;
- llvm::raw_string_ostream OS(Res);
+ llvm::SmallString<256> Res;
+ llvm::raw_svector_ostream OS(Res);
ArgStringList ASL;
render(Args, ASL);
@@ -74,117 +86,36 @@ void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const {
Output.push_back(getValue(Args, i));
}
-FlagArg::FlagArg(const Option *Opt, unsigned Index, const Arg *BaseArg)
- : Arg(FlagClass, Opt, Index, BaseArg) {
-}
-
-void FlagArg::render(const ArgList &Args, ArgStringList &Output) const {
- Output.push_back(Args.getArgString(getIndex()));
-}
-
-const char *FlagArg::getValue(const ArgList &Args, unsigned N) const {
- assert(0 && "Invalid index.");
- return 0;
-}
-
-PositionalArg::PositionalArg(const Option *Opt, unsigned Index,
- const Arg *BaseArg)
- : Arg(PositionalClass, Opt, Index, BaseArg) {
-}
-
-void PositionalArg::render(const ArgList &Args, ArgStringList &Output) const {
- Output.push_back(Args.getArgString(getIndex()));
-}
-
-const char *PositionalArg::getValue(const ArgList &Args, unsigned N) const {
- assert(N < getNumValues() && "Invalid index.");
- return Args.getArgString(getIndex());
-}
-
-JoinedArg::JoinedArg(const Option *Opt, unsigned Index, const Arg *BaseArg)
- : Arg(JoinedClass, Opt, Index, BaseArg) {
-}
-
-void JoinedArg::render(const ArgList &Args, ArgStringList &Output) const {
- if (getOption().hasForceSeparateRender()) {
- Output.push_back(getOption().getName());
- Output.push_back(getValue(Args, 0));
- } else {
- Output.push_back(Args.getArgString(getIndex()));
- }
-}
-
-const char *JoinedArg::getValue(const ArgList &Args, unsigned N) const {
- assert(N < getNumValues() && "Invalid index.");
- // FIXME: Avoid strlen.
- return Args.getArgString(getIndex()) + strlen(getOption().getName());
-}
-
-CommaJoinedArg::CommaJoinedArg(const Option *Opt, unsigned Index,
- const char *Str, const Arg *BaseArg)
- : Arg(CommaJoinedClass, Opt, Index, BaseArg) {
- const char *Prev = Str;
- for (;; ++Str) {
- char c = *Str;
-
- if (!c) {
- if (Prev != Str)
- Values.push_back(std::string(Prev, Str));
- break;
- } else if (c == ',') {
- if (Prev != Str)
- Values.push_back(std::string(Prev, Str));
- Prev = Str + 1;
+void Arg::render(const ArgList &Args, ArgStringList &Output) const {
+ switch (getOption().getRenderStyle()) {
+ case Option::RenderValuesStyle:
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+ break;
+
+ case Option::RenderCommaJoinedStyle: {
+ llvm::SmallString<256> Res;
+ llvm::raw_svector_ostream OS(Res);
+ OS << getOption().getName();
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
+ if (i) OS << ',';
+ OS << getValue(Args, i);
}
+ Output.push_back(Args.MakeArgString(OS.str()));
+ break;
}
-}
-
-void CommaJoinedArg::render(const ArgList &Args, ArgStringList &Output) const {
- Output.push_back(Args.getArgString(getIndex()));
-}
-
-const char *CommaJoinedArg::getValue(const ArgList &Args, unsigned N) const {
- assert(N < getNumValues() && "Invalid index.");
- return Values[N].c_str();
-}
-
-SeparateArg::SeparateArg(const Option *Opt, unsigned Index, unsigned _NumValues,
- const Arg *BaseArg)
- : Arg(SeparateClass, Opt, Index, BaseArg), NumValues(_NumValues) {
-}
+
+ case Option::RenderJoinedStyle:
+ Output.push_back(Args.GetOrMakeJoinedArgString(
+ getIndex(), getOption().getName(), getValue(Args, 0)));
+ for (unsigned i = 1, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+ break;
-void SeparateArg::render(const ArgList &Args, ArgStringList &Output) const {
- if (getOption().hasForceJoinedRender()) {
- assert(getNumValues() == 1 && "Cannot force joined render with > 1 args.");
- Output.push_back(Args.MakeArgString(llvm::StringRef(getOption().getName()) +
- getValue(Args, 0)));
- } else {
- Output.push_back(Args.getArgString(getIndex()));
- for (unsigned i = 0; i < NumValues; ++i)
+ case Option::RenderSeparateStyle:
+ Output.push_back(getOption().getName());
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
Output.push_back(getValue(Args, i));
+ break;
}
}
-
-const char *SeparateArg::getValue(const ArgList &Args, unsigned N) const {
- assert(N < getNumValues() && "Invalid index.");
- return Args.getArgString(getIndex() + 1 + N);
-}
-
-JoinedAndSeparateArg::JoinedAndSeparateArg(const Option *Opt, unsigned Index,
- const Arg *BaseArg)
- : Arg(JoinedAndSeparateClass, Opt, Index, BaseArg) {
-}
-
-void JoinedAndSeparateArg::render(const ArgList &Args,
- ArgStringList &Output) const {
- Output.push_back(Args.getArgString(getIndex()));
- Output.push_back(Args.getArgString(getIndex() + 1));
-}
-
-const char *JoinedAndSeparateArg::getValue(const ArgList &Args,
- unsigned N) const {
- assert(N < getNumValues() && "Invalid index.");
- if (N == 0)
- return Args.getArgString(getIndex()) + strlen(getOption().getName());
- return Args.getArgString(getIndex() + 1);
-}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
index 3d07431..9101523 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -36,7 +36,7 @@ void arg_iterator::SkipToNextArg() {
//
-ArgList::ArgList(arglist_type &_Args) : Args(_Args) {
+ArgList::ArgList() {
}
ArgList::~ArgList() {
@@ -62,12 +62,14 @@ Arg *ArgList::getLastArg(OptSpecifier Id) const {
}
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
- Arg *Res, *A0 = getLastArgNoClaim(Id0), *A1 = getLastArgNoClaim(Id1);
-
- if (A0 && A1)
- Res = A0->getIndex() > A1->getIndex() ? A0 : A1;
- else
- Res = A0 ? A0 : A1;
+ Arg *Res = 0;
+ for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1)) {
+ Res = *it;
+ break;
+ }
+ }
if (Res)
Res->claim();
@@ -78,24 +80,32 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
OptSpecifier Id2) const {
Arg *Res = 0;
- Arg *A0 = getLastArgNoClaim(Id0);
- Arg *A1 = getLastArgNoClaim(Id1);
- Arg *A2 = getLastArgNoClaim(Id2);
-
- int A0Idx = A0 ? (int) A0->getIndex() : -1;
- int A1Idx = A1 ? (int) A1->getIndex() : -1;
- int A2Idx = A2 ? (int) A2->getIndex() : -1;
-
- if (A0Idx > A1Idx) {
- if (A0Idx > A2Idx)
- Res = A0;
- else if (A2Idx != -1)
- Res = A2;
- } else {
- if (A1Idx > A2Idx)
- Res = A1;
- else if (A2Idx != -1)
- Res = A2;
+ for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2)) {
+ Res = *it;
+ break;
+ }
+ }
+
+ if (Res)
+ Res->claim();
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3) const {
+ Arg *Res = 0;
+ for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3)) {
+ Res = *it;
+ break;
+ }
}
if (Res)
@@ -147,8 +157,8 @@ void ArgList::AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
OptSpecifier Id1, OptSpecifier Id2) const {
for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
ie = filtered_end(); it != ie; ++it) {
- it->claim();
- it->render(*this, Output);
+ (*it)->claim();
+ (*it)->render(*this, Output);
}
}
@@ -156,9 +166,9 @@ void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
OptSpecifier Id1, OptSpecifier Id2) const {
for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
ie = filtered_end(); it != ie; ++it) {
- it->claim();
- for (unsigned i = 0, e = it->getNumValues(); i != e; ++i)
- Output.push_back(it->getValue(*this, i));
+ (*it)->claim();
+ for (unsigned i = 0, e = (*it)->getNumValues(); i != e; ++i)
+ Output.push_back((*it)->getValue(*this, i));
}
}
@@ -167,14 +177,14 @@ void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
bool Joined) const {
for (arg_iterator it = filtered_begin(Id0),
ie = filtered_end(); it != ie; ++it) {
- it->claim();
+ (*it)->claim();
if (Joined) {
Output.push_back(MakeArgString(llvm::StringRef(Translation) +
- it->getValue(*this, 0)));
+ (*it)->getValue(*this, 0)));
} else {
Output.push_back(Translation);
- Output.push_back(it->getValue(*this, 0));
+ Output.push_back((*it)->getValue(*this, 0));
}
}
}
@@ -182,7 +192,7 @@ void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
void ArgList::ClaimAllArgs(OptSpecifier Id0) const {
for (arg_iterator it = filtered_begin(Id0),
ie = filtered_end(); it != ie; ++it)
- it->claim();
+ (*it)->claim();
}
const char *ArgList::MakeArgString(const llvm::Twine &T) const {
@@ -191,10 +201,21 @@ const char *ArgList::MakeArgString(const llvm::Twine &T) const {
return MakeArgString(Str.str());
}
+const char *ArgList::GetOrMakeJoinedArgString(unsigned Index,
+ llvm::StringRef LHS,
+ llvm::StringRef RHS) const {
+ llvm::StringRef Cur = getArgString(Index);
+ if (Cur.size() == LHS.size() + RHS.size() &&
+ Cur.startswith(LHS) && Cur.endswith(RHS))
+ return Cur.data();
+
+ return MakeArgString(LHS + RHS);
+}
+
//
InputArgList::InputArgList(const char **ArgBegin, const char **ArgEnd)
- : ArgList(ActualArgs), NumInputArgStrings(ArgEnd - ArgBegin) {
+ : NumInputArgStrings(ArgEnd - ArgBegin) {
ArgStrings.append(ArgBegin, ArgEnd);
}
@@ -229,9 +250,8 @@ const char *InputArgList::MakeArgString(llvm::StringRef Str) const {
//
-DerivedArgList::DerivedArgList(InputArgList &_BaseArgs, bool _OnlyProxy)
- : ArgList(_OnlyProxy ? _BaseArgs.getArgs() : ActualArgs),
- BaseArgs(_BaseArgs), OnlyProxy(_OnlyProxy) {
+DerivedArgList::DerivedArgList(const InputArgList &_BaseArgs)
+ : BaseArgs(_BaseArgs) {
}
DerivedArgList::~DerivedArgList() {
@@ -246,30 +266,33 @@ const char *DerivedArgList::MakeArgString(llvm::StringRef Str) const {
}
Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option *Opt) const {
- Arg *A = new FlagArg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg);
+ Arg *A = new Arg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option *Opt,
llvm::StringRef Value) const {
- Arg *A = new PositionalArg(Opt, BaseArgs.MakeIndex(Value), BaseArg);
+ unsigned Index = BaseArgs.MakeIndex(Value);
+ Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
llvm::StringRef Value) const {
- Arg *A = new SeparateArg(Opt, BaseArgs.MakeIndex(Opt->getName(), Value), 1,
- BaseArg);
+ unsigned Index = BaseArgs.MakeIndex(Opt->getName(), Value);
+ Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index + 1), BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
llvm::StringRef Value) const {
- Arg *A = new JoinedArg(Opt, BaseArgs.MakeIndex(Opt->getName() + Value.str()),
- BaseArg);
+ unsigned Index = BaseArgs.MakeIndex(Opt->getName() + Value.str());
+ Arg *A = new Arg(Opt, Index,
+ BaseArgs.getArgString(Index) + strlen(Opt->getName()),
+ BaseArg);
SynthesizedArgs.push_back(A);
return A;
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt
index 5af754d..00d076b 100644
--- a/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt
@@ -21,5 +21,5 @@ add_clang_library(clangDriver
Types.cpp
)
-add_dependencies(clangDriver ClangDiagnosticDriver ClangDriverOptions
- ClangCC1Options ClangCC1AsOptions)
+add_dependencies(clangDriver ClangAttrList ClangDiagnosticDriver
+ ClangDriverOptions ClangCC1Options ClangCC1AsOptions)
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
index 227f79a..282e9fe 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -22,20 +22,22 @@
#include <errno.h>
using namespace clang::driver;
-Compilation::Compilation(const Driver &D,
- const ToolChain &_DefaultToolChain,
- InputArgList *_Args)
- : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args) {
+Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain,
+ InputArgList *_Args, DerivedArgList *_TranslatedArgs)
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args),
+ TranslatedArgs(_TranslatedArgs) {
}
Compilation::~Compilation() {
+ delete TranslatedArgs;
delete Args;
// Free any derived arg lists.
for (llvm::DenseMap<std::pair<const ToolChain*, const char*>,
DerivedArgList*>::iterator it = TCArgs.begin(),
ie = TCArgs.end(); it != ie; ++it)
- delete it->second;
+ if (it->second != TranslatedArgs)
+ delete it->second;
// Free the actions, if built.
for (ActionList::iterator it = Actions.begin(), ie = Actions.end();
@@ -49,8 +51,11 @@ const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC,
TC = &DefaultToolChain;
DerivedArgList *&Entry = TCArgs[std::make_pair(TC, BoundArch)];
- if (!Entry)
- Entry = TC->TranslateArgs(*Args, BoundArch);
+ if (!Entry) {
+ Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch);
+ if (!Entry)
+ Entry = TranslatedArgs;
+ }
return *Entry;
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
index da83803..2fc0a53 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -39,9 +39,6 @@
using namespace clang::driver;
using namespace clang;
-// Used to set values for "production" clang, for releases.
-// #define USE_PRODUCTION_CLANG
-
Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
llvm::StringRef _DefaultHostTriple,
llvm::StringRef _DefaultImageName,
@@ -78,6 +75,11 @@ Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
P.appendComponent("clang");
P.appendComponent(CLANG_VERSION_STRING);
ResourceDir = P.str();
+
+ // Save the original clang executable path.
+ P = Dir;
+ P.appendComponent(Name);
+ ClangExecutable = P.str();
}
Driver::~Driver() {
@@ -110,6 +112,57 @@ InputArgList *Driver::ParseArgStrings(const char **ArgBegin,
return Args;
}
+DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
+ DerivedArgList *DAL = new DerivedArgList(Args);
+
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+
+ // Unfortunately, we have to parse some forwarding options (-Xassembler,
+ // -Xlinker, -Xpreprocessor) because we either integrate their functionality
+ // (assembler and preprocessor), or bypass a previous driver ('collect2').
+
+ // Rewrite linker options, to replace --no-demangle with a custom internal
+ // option.
+ if ((A->getOption().matches(options::OPT_Wl_COMMA) ||
+ A->getOption().matches(options::OPT_Xlinker)) &&
+ A->containsValue("--no-demangle")) {
+ // Add the rewritten no-demangle argument.
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
+
+ // Add the remaining values as Xlinker arguments.
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ if (llvm::StringRef(A->getValue(Args, i)) != "--no-demangle")
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker),
+ A->getValue(Args, i));
+
+ continue;
+ }
+
+ // Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by
+ // some build systems. We don't try to be complete here because we don't
+ // care to encourage this usage model.
+ if (A->getOption().matches(options::OPT_Wp_COMMA) &&
+ A->getNumValues() == 2 &&
+ (A->getValue(Args, 0) == llvm::StringRef("-MD") ||
+ A->getValue(Args, 0) == llvm::StringRef("-MMD"))) {
+ // Rewrite to -MD/-MMD along with -MF.
+ if (A->getValue(Args, 0) == llvm::StringRef("-MD"))
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
+ else
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
+ A->getValue(Args, 1));
+ continue;
+ }
+
+ DAL->append(*it);
+ }
+
+ return DAL;
+}
+
Compilation *Driver::BuildCompilation(int argc, const char **argv) {
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
@@ -179,12 +232,16 @@ Compilation *Driver::BuildCompilation(int argc, const char **argv) {
Host = GetHostInfo(HostTriple);
+ // Perform the default argument translations.
+ DerivedArgList *TranslatedArgs = TranslateInputArgs(*Args);
+
// The compilation takes ownership of Args.
- Compilation *C = new Compilation(*this, *Host->CreateToolChain(*Args), Args);
+ Compilation *C = new Compilation(*this, *Host->CreateToolChain(*Args), Args,
+ TranslatedArgs);
// FIXME: This behavior shouldn't be here.
if (CCCPrintOptions) {
- PrintOptions(C->getArgs());
+ PrintOptions(C->getInputArgs());
return C;
}
@@ -274,8 +331,6 @@ void Driver::PrintOptions(const ArgList &Args) const {
}
}
-// FIXME: Move -ccc options to real options in the .td file (or eliminate), and
-// then move to using OptTable::PrintHelp.
void Driver::PrintHelp(bool ShowHidden) const {
getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
ShowHidden);
@@ -303,14 +358,14 @@ static void PrintDiagnosticCategories(llvm::raw_ostream &OS) {
}
bool Driver::HandleImmediateArgs(const Compilation &C) {
- // The order these options are handled in in gcc is all over the place, but we
+ // The order these options are handled in gcc is all over the place, but we
// don't expect inconsistencies w.r.t. that to matter in practice.
if (C.getArgs().hasArg(options::OPT_dumpversion)) {
llvm::outs() << CLANG_VERSION_STRING "\n";
return false;
}
-
+
if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
PrintDiagnosticCategories(llvm::outs());
return false;
@@ -457,6 +512,19 @@ void Driver::PrintActions(const Compilation &C) const {
PrintActions1(C, *it, Ids);
}
+/// \brief Check whether the given input tree contains any compilation (or
+/// assembly) actions.
+static bool ContainsCompileAction(const Action *A) {
+ if (isa<CompileJobAction>(A) || isa<AssembleJobAction>(A))
+ return true;
+
+ for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
+ if (ContainsCompileAction(*it))
+ return true;
+
+ return false;
+}
+
void Driver::BuildUniversalActions(const ArgList &Args,
ActionList &Actions) const {
llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
@@ -504,7 +572,8 @@ void Driver::BuildUniversalActions(const ArgList &Args,
ActionList SingleActions;
BuildActions(Args, SingleActions);
- // Add in arch binding and lipo (if necessary) for every top level action.
+ // Add in arch bindings for every top level action, as well as lipo and
+ // dsymutil steps if needed.
for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) {
Action *Act = SingleActions[i];
@@ -531,6 +600,23 @@ void Driver::BuildUniversalActions(const ArgList &Args,
Actions.append(Inputs.begin(), Inputs.end());
else
Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
+
+ // Add a 'dsymutil' step if necessary, when debug info is enabled and we
+ // have a compile input. We need to run 'dsymutil' ourselves in such cases
+ // because the debug info will refer to a temporary object file which is
+ // will be removed at the end of the compilation process.
+ if (Act->getType() == types::TY_Image) {
+ Arg *A = Args.getLastArg(options::OPT_g_Group);
+ if (A && !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_gstabs) &&
+ ContainsCompileAction(Actions.back())) {
+ ActionList Inputs;
+ Inputs.push_back(Actions.back());
+ Actions.pop_back();
+
+ Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
+ }
+ }
}
}
@@ -783,7 +869,7 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
} else if (Args.hasArg(options::OPT_emit_llvm) ||
Args.hasArg(options::OPT_flto) || HasO4) {
types::ID Output =
- Args.hasArg(options::OPT_S) ? types::TY_LLVMAsm : types::TY_LLVMBC;
+ Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return new CompileJobAction(Input, Output);
} else {
return new CompileJobAction(Input, types::TY_PP_Asm);
@@ -962,7 +1048,7 @@ void Driver::BuildJobsForAction(Compilation &C,
// just using Args was better?
const Arg &Input = IA->getInputArg();
Input.claim();
- if (isa<PositionalArg>(Input)) {
+ if (Input.getOption().matches(options::OPT_INPUT)) {
const char *Name = Input.getValue(C.getArgs());
Result = InputInfo(Name, A->getType(), Name);
} else
@@ -992,9 +1078,17 @@ void Driver::BuildJobsForAction(Compilation &C,
InputInfoList InputInfos;
for (ActionList::const_iterator it = Inputs->begin(), ie = Inputs->end();
it != ie; ++it) {
+ // Treat dsymutil sub-jobs as being at the top-level too, they shouldn't get
+ // temporary output names.
+ //
+ // FIXME: Clean this up.
+ bool SubJobAtTopLevel = false;
+ if (AtTopLevel && isa<DsymutilJobAction>(A))
+ SubJobAtTopLevel = true;
+
InputInfo II;
BuildJobsForAction(C, *it, TC, BoundArch, TryToUsePipeInput,
- /*AtTopLevel*/false, LinkingOutput, II);
+ SubJobAtTopLevel, LinkingOutput, II);
InputInfos.push_back(II);
}
@@ -1023,6 +1117,11 @@ void Driver::BuildJobsForAction(Compilation &C,
// Always use the first input as the base input.
const char *BaseInput = InputInfos[0].getBaseInput();
+ // ... except dsymutil actions, which use their actual input as the base
+ // input.
+ if (JA->getType() == types::TY_dSYM)
+ BaseInput = InputInfos[0].getFilename();
+
// Determine the place to write output to (nothing, pipe, or filename) and
// where to put the new job.
if (JA->getType() == types::TY_Nothing) {
@@ -1065,7 +1164,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
bool AtTopLevel) const {
llvm::PrettyStackTraceString CrashInfo("Computing output path");
// Output to a user requested destination?
- if (AtTopLevel) {
+ if (AtTopLevel && !isa<DsymutilJobAction>(JA)) {
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
return C.addResultFile(FinalOutput->getValue(C.getArgs()));
}
@@ -1191,7 +1290,7 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
// TCE is an osless target
if (Triple.getArchName() == "tce")
- return createTCEHostInfo(*this, Triple);
+ return createTCEHostInfo(*this, Triple);
switch (Triple.getOS()) {
case llvm::Triple::AuroraUX:
@@ -1204,6 +1303,8 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
return createOpenBSDHostInfo(*this, Triple);
case llvm::Triple::FreeBSD:
return createFreeBSDHostInfo(*this, Triple);
+ case llvm::Triple::Minix:
+ return createMinixHostInfo(*this, Triple);
case llvm::Triple::Linux:
return createLinuxHostInfo(*this, Triple);
default:
@@ -1236,8 +1337,8 @@ bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
// Always use clang for precompiling, AST generation, and rewriting,
// regardless of archs.
- if (isa<PrecompileJobAction>(JA) || JA.getType() == types::TY_AST ||
- JA.getType() == types::TY_RewrittenObjC)
+ if (isa<PrecompileJobAction>(JA) ||
+ types::isOnlyAcceptedByClang(JA.getType()))
return true;
// Finally, don't use clang if this isn't one of the user specified archs to
diff --git a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
index d9e2e37..0636d9e 100644
--- a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp
@@ -425,6 +425,58 @@ ToolChain *FreeBSDHostInfo::CreateToolChain(const ArgList &Args,
return TC;
}
+// Minix Host Info
+
+/// MinixHostInfo - Minix host information implementation.
+class MinixHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ MinixHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {}
+ ~MinixHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+ }
+
+ virtual ToolChain *CreateToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+MinixHostInfo::~MinixHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it){
+ delete it->second;
+ }
+}
+
+bool MinixHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *MinixHostInfo::CreateToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ std::string Arch = getArchName();
+ ArchName = Arch.c_str();
+
+ ToolChain *&TC = ToolChains[ArchName];
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(ArchName);
+
+ TC = new toolchains::Minix(*this, TCTriple);
+ }
+
+ return TC;
+}
+
// DragonFly Host Info
/// DragonFlyHostInfo - DragonFly host information implementation.
@@ -566,6 +618,12 @@ clang::driver::createFreeBSDHostInfo(const Driver &D,
}
const HostInfo *
+clang::driver::createMinixHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new MinixHostInfo(D, Triple);
+}
+
+const HostInfo *
clang::driver::createDragonFlyHostInfo(const Driver &D,
const llvm::Triple& Triple) {
return new DragonFlyHostInfo(D, Triple);
diff --git a/contrib/llvm/tools/clang/lib/Driver/Makefile b/contrib/llvm/tools/clang/lib/Driver/Makefile
index 371bda7..7bc340e 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Makefile
+++ b/contrib/llvm/tools/clang/lib/Driver/Makefile
@@ -7,10 +7,8 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangDriver
BUILD_ARCHIVE = 1
-CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
index de1e459..39530f2 100644
--- a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
@@ -169,12 +169,12 @@ Option *OptTable::CreateOption(unsigned id) const {
if (info.Flags & RenderJoined) {
assert((info.Kind == Option::JoinedOrSeparateClass ||
info.Kind == Option::SeparateClass) && "Invalid option.");
- Opt->setForceJoinedRender(true);
+ Opt->setRenderStyle(Option::RenderJoinedStyle);
}
if (info.Flags & RenderSeparate) {
assert((info.Kind == Option::JoinedOrSeparateClass ||
info.Kind == Option::JoinedClass) && "Invalid option.");
- Opt->setForceSeparateRender(true);
+ Opt->setRenderStyle(Option::RenderSeparateStyle);
}
if (info.Flags & Unsupported)
Opt->setUnsupported(true);
@@ -182,13 +182,13 @@ Option *OptTable::CreateOption(unsigned id) const {
return Opt;
}
-Arg *OptTable::ParseOneArg(const InputArgList &Args, unsigned &Index) const {
+Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
unsigned Prev = Index;
const char *Str = Args.getArgString(Index);
// Anything that doesn't start with '-' is an input, as is '-' itself.
if (Str[0] != '-' || Str[1] == '\0')
- return new PositionalArg(TheInputOption, Index++);
+ return new Arg(TheInputOption, Index++, Str);
const Info *Start = OptionInfos + FirstSearchableIndex;
const Info *End = OptionInfos + getNumOptions();
@@ -221,7 +221,7 @@ Arg *OptTable::ParseOneArg(const InputArgList &Args, unsigned &Index) const {
return 0;
}
- return new PositionalArg(TheUnknownOption, Index++);
+ return new Arg(TheUnknownOption, Index++, Str);
}
InputArgList *OptTable::ParseArgs(const char **ArgBegin, const char **ArgEnd,
@@ -267,7 +267,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
case Option::GroupClass: case Option::InputClass: case Option::UnknownClass:
assert(0 && "Invalid option with help text.");
- case Option::MultiArgClass: case Option::JoinedAndSeparateClass:
+ case Option::MultiArgClass:
assert(0 && "Cannot print metavar for this kind of option.");
case Option::FlagClass:
@@ -277,6 +277,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
Name += ' ';
// FALLTHROUGH
case Option::JoinedClass: case Option::CommaJoinedClass:
+ case Option::JoinedAndSeparateClass:
if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
Name += MetaVarName;
else
diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
index 17d00f5..dd48af8 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Option.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
@@ -20,7 +20,6 @@ Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name,
const OptionGroup *_Group, const Option *_Alias)
: Kind(_Kind), ID(_ID.getID()), Name(_Name), Group(_Group), Alias(_Alias),
Unsupported(false), LinkerInput(false), NoOptAsInput(false),
- ForceSeparateRender(false), ForceJoinedRender(false),
DriverOption(false), NoArgumentUnused(false) {
// Multi-level aliases are not supported, and alias options cannot
@@ -28,6 +27,31 @@ Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name,
// inherent limitation.
assert((!Alias || (!Alias->Alias && !Group)) &&
"Multi-level aliases and aliases with groups are unsupported.");
+
+ // Initialize rendering options based on the class.
+ switch (Kind) {
+ case GroupClass:
+ case InputClass:
+ case UnknownClass:
+ RenderStyle = RenderValuesStyle;
+ break;
+
+ case JoinedClass:
+ case JoinedAndSeparateClass:
+ RenderStyle = RenderJoinedStyle;
+ break;
+
+ case CommaJoinedClass:
+ RenderStyle = RenderCommaJoinedStyle;
+ break;
+
+ case FlagClass:
+ case SeparateClass:
+ case MultiArgClass:
+ case JoinedOrSeparateClass:
+ RenderStyle = RenderSeparateStyle;
+ break;
+ }
}
Option::~Option() {
@@ -89,7 +113,7 @@ OptionGroup::OptionGroup(OptSpecifier ID, const char *Name,
: Option(Option::GroupClass, ID, Name, Group, 0) {
}
-Arg *OptionGroup::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *OptionGroup::accept(const ArgList &Args, unsigned &Index) const {
assert(0 && "accept() should never be called on an OptionGroup");
return 0;
}
@@ -98,7 +122,7 @@ InputOption::InputOption(OptSpecifier ID)
: Option(Option::InputClass, ID, "<input>", 0, 0) {
}
-Arg *InputOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *InputOption::accept(const ArgList &Args, unsigned &Index) const {
assert(0 && "accept() should never be called on an InputOption");
return 0;
}
@@ -107,7 +131,7 @@ UnknownOption::UnknownOption(OptSpecifier ID)
: Option(Option::UnknownClass, ID, "<unknown>", 0, 0) {
}
-Arg *UnknownOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *UnknownOption::accept(const ArgList &Args, unsigned &Index) const {
assert(0 && "accept() should never be called on an UnknownOption");
return 0;
}
@@ -117,13 +141,13 @@ FlagOption::FlagOption(OptSpecifier ID, const char *Name,
: Option(Option::FlagClass, ID, Name, Group, Alias) {
}
-Arg *FlagOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *FlagOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (strlen(getName()) != strlen(Args.getArgString(Index)))
return 0;
- return new FlagArg(this, Index++);
+ return new Arg(getUnaliasedOption(), Index++);
}
JoinedOption::JoinedOption(OptSpecifier ID, const char *Name,
@@ -131,9 +155,10 @@ JoinedOption::JoinedOption(OptSpecifier ID, const char *Name,
: Option(Option::JoinedClass, ID, Name, Group, Alias) {
}
-Arg *JoinedOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *JoinedOption::accept(const ArgList &Args, unsigned &Index) const {
// Always matches.
- return new JoinedArg(this, Index++);
+ const char *Value = Args.getArgString(Index) + strlen(getName());
+ return new Arg(getUnaliasedOption(), Index++, Value);
}
CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name,
@@ -142,15 +167,34 @@ CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name,
: Option(Option::CommaJoinedClass, ID, Name, Group, Alias) {
}
-Arg *CommaJoinedOption::accept(const InputArgList &Args,
+Arg *CommaJoinedOption::accept(const ArgList &Args,
unsigned &Index) const {
- // Always matches. We count the commas now so we can answer
- // getNumValues easily.
+ // Always matches.
+ const char *Str = Args.getArgString(Index) + strlen(getName());
+ Arg *A = new Arg(getUnaliasedOption(), Index++);
+
+ // Parse out the comma separated values.
+ const char *Prev = Str;
+ for (;; ++Str) {
+ char c = *Str;
+
+ if (!c || c == ',') {
+ if (Prev != Str) {
+ char *Value = new char[Str - Prev + 1];
+ memcpy(Value, Prev, Str - Prev);
+ Value[Str - Prev] = '\0';
+ A->getValues().push_back(Value);
+ }
+
+ if (!c)
+ break;
+
+ Prev = Str + 1;
+ }
+ }
+ A->setOwnsValues(true);
- // Get the suffix string.
- // FIXME: Avoid strlen, and move to helper method?
- const char *Suffix = Args.getArgString(Index) + strlen(getName());
- return new CommaJoinedArg(this, Index++, Suffix);
+ return A;
}
SeparateOption::SeparateOption(OptSpecifier ID, const char *Name,
@@ -158,7 +202,7 @@ SeparateOption::SeparateOption(OptSpecifier ID, const char *Name,
: Option(Option::SeparateClass, ID, Name, Group, Alias) {
}
-Arg *SeparateOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *SeparateOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (strlen(getName()) != strlen(Args.getArgString(Index)))
@@ -168,7 +212,7 @@ Arg *SeparateOption::accept(const InputArgList &Args, unsigned &Index) const {
if (Index > Args.getNumInputArgStrings())
return 0;
- return new SeparateArg(this, Index - 2, 1);
+ return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
}
MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name,
@@ -178,7 +222,7 @@ MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name,
assert(NumArgs > 1 && "Invalid MultiArgOption!");
}
-Arg *MultiArgOption::accept(const InputArgList &Args, unsigned &Index) const {
+Arg *MultiArgOption::accept(const ArgList &Args, unsigned &Index) const {
// Matches iff this is an exact match.
// FIXME: Avoid strlen.
if (strlen(getName()) != strlen(Args.getArgString(Index)))
@@ -188,28 +232,35 @@ Arg *MultiArgOption::accept(const InputArgList &Args, unsigned &Index) const {
if (Index > Args.getNumInputArgStrings())
return 0;
- return new SeparateArg(this, Index - 1 - NumArgs, NumArgs);
+ Arg *A = new Arg(getUnaliasedOption(), Index - 1 - NumArgs,
+ Args.getArgString(Index - NumArgs));
+ for (unsigned i = 1; i != NumArgs; ++i)
+ A->getValues().push_back(Args.getArgString(Index - NumArgs + i));
+ return A;
}
-JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID, const char *Name,
+JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID,
+ const char *Name,
const OptionGroup *Group,
const Option *Alias)
: Option(Option::JoinedOrSeparateClass, ID, Name, Group, Alias) {
}
-Arg *JoinedOrSeparateOption::accept(const InputArgList &Args,
+Arg *JoinedOrSeparateOption::accept(const ArgList &Args,
unsigned &Index) const {
// If this is not an exact match, it is a joined arg.
// FIXME: Avoid strlen.
- if (strlen(getName()) != strlen(Args.getArgString(Index)))
- return new JoinedArg(this, Index++);
+ if (strlen(getName()) != strlen(Args.getArgString(Index))) {
+ const char *Value = Args.getArgString(Index) + strlen(getName());
+ return new Arg(this, Index++, Value);
+ }
// Otherwise it must be separate.
Index += 2;
if (Index > Args.getNumInputArgStrings())
return 0;
- return new SeparateArg(this, Index - 2, 1);
+ return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
}
JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID,
@@ -219,7 +270,7 @@ JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID,
: Option(Option::JoinedAndSeparateClass, ID, Name, Group, Alias) {
}
-Arg *JoinedAndSeparateOption::accept(const InputArgList &Args,
+Arg *JoinedAndSeparateOption::accept(const ArgList &Args,
unsigned &Index) const {
// Always matches.
@@ -227,6 +278,7 @@ Arg *JoinedAndSeparateOption::accept(const InputArgList &Args,
if (Index > Args.getNumInputArgStrings())
return 0;
- return new JoinedAndSeparateArg(this, Index - 2);
+ return new Arg(getUnaliasedOption(), Index - 2,
+ Args.getArgString(Index-2)+strlen(getName()),
+ Args.getArgString(Index-1));
}
-
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
index 9b6264a..9fae67d 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -26,14 +26,11 @@ const Driver &ToolChain::getDriver() const {
return Host.getDriver();
}
-std::string ToolChain::GetFilePath(const Compilation &C,
- const char *Name) const {
+std::string ToolChain::GetFilePath(const char *Name) const {
return Host.getDriver().GetFilePath(Name, *this);
}
-std::string ToolChain::GetProgramPath(const Compilation &C,
- const char *Name,
- bool WantFile) const {
+std::string ToolChain::GetProgramPath(const char *Name, bool WantFile) const {
return Host.getDriver().GetProgramPath(Name, *this, WantFile);
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
index abb55b0..a78d153 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -225,6 +225,8 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA) const {
T = new tools::darwin::Link(*this); break;
case Action::LipoJobClass:
T = new tools::darwin::Lipo(*this); break;
+ case Action::DsymutilJobClass:
+ T = new tools::darwin::Dsymutil(*this); break;
}
}
@@ -323,6 +325,33 @@ DarwinClang::DarwinClang(const HostInfo &Host, const llvm::Triple& Triple,
void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
// The Clang toolchain uses explicit paths for internal libraries.
+
+ // Unfortunately, we still might depend on a few of the libraries that are
+ // only available in the gcc library directory (in particular
+ // libstdc++.dylib). For now, hardcode the path to the known install location.
+ llvm::sys::Path P(getDriver().Dir);
+ P.eraseComponent(); // .../usr/bin -> ../usr
+ P.appendComponent("lib");
+ P.appendComponent("gcc");
+ switch (getTriple().getArch()) {
+ default:
+ assert(0 && "Invalid Darwin arch!");
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ P.appendComponent("i686-apple-darwin10");
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ P.appendComponent("arm-apple-darwin10");
+ break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ P.appendComponent("powerpc-apple-darwin10");
+ break;
+ }
+ P.appendComponent("4.2.1");
+ if (P.exists())
+ CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
}
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
@@ -386,9 +415,9 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
}
}
-DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
+DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
const char *BoundArch) const {
- DerivedArgList *DAL = new DerivedArgList(Args, false);
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
// FIXME: We really want to get out of the tool chain level argument
@@ -440,19 +469,10 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
iPhoneVersion = DAL->MakeJoinedArg(0, O, iPhoneOSTarget);
DAL->append(iPhoneVersion);
} else {
- // Otherwise, choose a default platform based on the tool chain.
- //
- // FIXME: Don't hardcode default here.
- if (getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::thumb) {
- const Option *O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
- iPhoneVersion = DAL->MakeJoinedArg(0, O, "3.0");
- DAL->append(iPhoneVersion);
- } else {
- const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
- OSXVersion = DAL->MakeJoinedArg(0, O, MacosxVersionMin);
- DAL->append(OSXVersion);
- }
+ // Otherwise, assume we are targeting OS X.
+ const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ OSXVersion = DAL->MakeJoinedArg(0, O, MacosxVersionMin);
+ DAL->append(OSXVersion);
}
}
@@ -476,7 +496,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
}
setTarget(iPhoneVersion, Major, Minor, Micro);
- for (ArgList::iterator it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it) {
Arg *A = *it;
if (A->getOption().matches(options::OPT_Xarch__)) {
@@ -484,9 +505,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
if (getArchName() != A->getValue(Args, 0))
continue;
- // FIXME: The arg is leaked here, and we should have a nicer
- // interface for this.
- unsigned Prev, Index = Prev = A->getIndex() + 1;
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(Args, 1));
+ unsigned Prev = Index;
Arg *XarchArg = Opts.ParseOneArg(Args, Index);
// If the argument parsing failed or more than one argument was
@@ -506,6 +526,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
XarchArg->setBaseArg(A);
A = XarchArg;
+
+ DAL->AddSynthesizedArg(A);
}
// Sob. These is strictly gcc compatible for the time being. Apple
@@ -519,66 +541,61 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
case options::OPT_mkernel:
case options::OPT_fapple_kext:
DAL->append(A);
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_static));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_static));
break;
case options::OPT_dependency_file:
- DAL->append(DAL->MakeSeparateArg(A, Opts.getOption(options::OPT_MF),
- A->getValue(Args)));
+ DAL->AddSeparateArg(A, Opts.getOption(options::OPT_MF),
+ A->getValue(Args));
break;
case options::OPT_gfull:
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag)));
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols));
break;
case options::OPT_gused:
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag)));
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_feliminate_unused_debug_symbols)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_feliminate_unused_debug_symbols));
break;
case options::OPT_fterminated_vtables:
case options::OPT_findirect_virtual_calls:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_fapple_kext)));
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_fapple_kext));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_static));
break;
case options::OPT_shared:
- DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_dynamiclib)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_dynamiclib));
break;
case options::OPT_fconstant_cfstrings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mconstant_cfstrings)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mconstant_cfstrings));
break;
case options::OPT_fno_constant_cfstrings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mno_constant_cfstrings)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_constant_cfstrings));
break;
case options::OPT_Wnonportable_cfstrings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mwarn_nonportable_cfstrings)));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_mwarn_nonportable_cfstrings));
break;
case options::OPT_Wno_nonportable_cfstrings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings)));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings));
break;
case options::OPT_fpascal_strings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mpascal_strings)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings));
break;
case options::OPT_fno_pascal_strings:
- DAL->append(DAL->MakeFlagArg(A,
- Opts.getOption(options::OPT_mno_pascal_strings)));
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings));
break;
}
}
@@ -586,8 +603,7 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64)
if (!Args.hasArgNoClaim(options::OPT_mtune_EQ))
- DAL->append(DAL->MakeJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ),
- "core2"));
+ DAL->AddJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ), "core2");
// Add the arch options based on the particular spelling of -arch, to match
// how the driver driver works.
@@ -601,57 +617,57 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args,
if (Name == "ppc")
;
else if (Name == "ppc601")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "601"));
+ DAL->AddJoinedArg(0, MCpu, "601");
else if (Name == "ppc603")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "603"));
+ DAL->AddJoinedArg(0, MCpu, "603");
else if (Name == "ppc604")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "604"));
+ DAL->AddJoinedArg(0, MCpu, "604");
else if (Name == "ppc604e")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "604e"));
+ DAL->AddJoinedArg(0, MCpu, "604e");
else if (Name == "ppc750")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "750"));
+ DAL->AddJoinedArg(0, MCpu, "750");
else if (Name == "ppc7400")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "7400"));
+ DAL->AddJoinedArg(0, MCpu, "7400");
else if (Name == "ppc7450")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "7450"));
+ DAL->AddJoinedArg(0, MCpu, "7450");
else if (Name == "ppc970")
- DAL->append(DAL->MakeJoinedArg(0, MCpu, "970"));
+ DAL->AddJoinedArg(0, MCpu, "970");
else if (Name == "ppc64")
- DAL->append(DAL->MakeFlagArg(0, Opts.getOption(options::OPT_m64)));
+ DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64));
else if (Name == "i386")
;
else if (Name == "i486")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "i486"));
+ DAL->AddJoinedArg(0, MArch, "i486");
else if (Name == "i586")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "i586"));
+ DAL->AddJoinedArg(0, MArch, "i586");
else if (Name == "i686")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "i686"));
+ DAL->AddJoinedArg(0, MArch, "i686");
else if (Name == "pentium")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium"));
+ DAL->AddJoinedArg(0, MArch, "pentium");
else if (Name == "pentium2")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium2"));
+ DAL->AddJoinedArg(0, MArch, "pentium2");
else if (Name == "pentpro")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "pentiumpro"));
+ DAL->AddJoinedArg(0, MArch, "pentiumpro");
else if (Name == "pentIIm3")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium2"));
+ DAL->AddJoinedArg(0, MArch, "pentium2");
else if (Name == "x86_64")
- DAL->append(DAL->MakeFlagArg(0, Opts.getOption(options::OPT_m64)));
+ DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64));
else if (Name == "arm")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "armv4t"));
+ DAL->AddJoinedArg(0, MArch, "armv4t");
else if (Name == "armv4t")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "armv4t"));
+ DAL->AddJoinedArg(0, MArch, "armv4t");
else if (Name == "armv5")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "armv5tej"));
+ DAL->AddJoinedArg(0, MArch, "armv5tej");
else if (Name == "xscale")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "xscale"));
+ DAL->AddJoinedArg(0, MArch, "xscale");
else if (Name == "armv6")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "armv6k"));
+ DAL->AddJoinedArg(0, MArch, "armv6k");
else if (Name == "armv7")
- DAL->append(DAL->MakeJoinedArg(0, MArch, "armv7a"));
+ DAL->AddJoinedArg(0, MArch, "armv7a");
else
llvm_unreachable("invalid Darwin arch");
@@ -740,6 +756,8 @@ Tool &Generic_GCC::SelectTool(const Compilation &C,
// driver is Darwin.
case Action::LipoJobClass:
T = new tools::darwin::Lipo(*this); break;
+ case Action::DsymutilJobClass:
+ T = new tools::darwin::Dsymutil(*this); break;
}
}
@@ -760,12 +778,6 @@ const char *Generic_GCC::GetForcedPicModel() const {
return 0;
}
-DerivedArgList *Generic_GCC::TranslateArgs(InputArgList &Args,
- const char *BoundArch) const {
- return new DerivedArgList(Args, true);
-}
-
-
/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
/// Currently does not support anything else but compilation.
@@ -820,11 +832,6 @@ Tool &TCEToolChain::SelectTool(const Compilation &C,
return *T;
}
-DerivedArgList *TCEToolChain::TranslateArgs(InputArgList &Args,
- const char *BoundArch) const {
- return new DerivedArgList(Args, true);
-}
-
/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
OpenBSD::OpenBSD(const HostInfo &Host, const llvm::Triple& Triple)
@@ -859,6 +866,8 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
FreeBSD::FreeBSD(const HostInfo &Host, const llvm::Triple& Triple, bool Lib32)
: Generic_GCC(Host, Triple) {
+ getProgramPaths().push_back(getDriver().Dir + "/../libexec");
+ getProgramPaths().push_back("/usr/libexec");
if (Lib32) {
getFilePaths().push_back(getDriver().Dir + "/../lib32");
getFilePaths().push_back("/usr/lib32");
@@ -890,6 +899,38 @@ Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
return *T;
}
+/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
+
+Minix::Minix(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_GCC(Host, Triple) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back("/usr/gnu/lib");
+ getFilePaths().push_back("/usr/gnu/lib/gcc/i686-pc-minix/4.4.3");
+}
+
+Tool &Minix::SelectTool(const Compilation &C, const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::minix::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::minix::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA);
+ }
+ }
+
+ return *T;
+}
+
/// AuroraUX - AuroraUX tool chain which can call as(1) and ld(1) directly.
AuroraUX::AuroraUX(const HostInfo &Host, const llvm::Triple& Triple)
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
index ad975bf..4bdd00f 100644
--- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -33,9 +33,6 @@ public:
Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple);
~Generic_GCC();
- virtual DerivedArgList *TranslateArgs(InputArgList &Args,
- const char *BoundArch) const;
-
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
virtual bool IsUnwindTablesDefault() const;
@@ -147,7 +144,7 @@ public:
/// @name ToolChain Implementation
/// {
- virtual DerivedArgList *TranslateArgs(InputArgList &Args,
+ virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
const char *BoundArch) const;
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
@@ -160,9 +157,13 @@ public:
return !isMacosxVersionLT(10, 6);
}
virtual bool IsIntegratedAssemblerDefault() const {
+#ifdef DISABLE_DEFAULT_INTEGRATED_ASSEMBLER
+ return false;
+#else
// Default integrated assembler to on for x86.
return (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64);
+#endif
}
virtual bool IsObjCNonFragileABIDefault() const {
// Non-fragile ABI is default for everything but i386.
@@ -270,6 +271,13 @@ public:
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
+class LLVM_LIBRARY_VISIBILITY Minix : public Generic_GCC {
+public:
+ Minix(const HostInfo &Host, const llvm::Triple& Triple);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+};
+
class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_GCC {
public:
DragonFly(const HostInfo &Host, const llvm::Triple& Triple);
@@ -290,8 +298,6 @@ public:
TCEToolChain(const HostInfo &Host, const llvm::Triple& Triple);
~TCEToolChain();
- virtual DerivedArgList *TranslateArgs(InputArgList &Args,
- const char *BoundArch) const;
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
bool IsMathErrnoDefault() const;
bool IsUnwindTablesDefault() const;
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
index ce35552..c2cb1fb 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -157,18 +157,18 @@ void Clang::AddPreprocessingOptions(const Driver &D,
for (arg_iterator it = Args.filtered_begin(options::OPT_MT,
options::OPT_MQ),
ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
- it->claim();
-
- if (it->getOption().matches(options::OPT_MQ)) {
+ if (A->getOption().matches(options::OPT_MQ)) {
CmdArgs.push_back("-MT");
llvm::SmallString<128> Quoted;
- QuoteTarget(it->getValue(Args), Quoted);
+ QuoteTarget(A->getValue(Args), Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
// -MT flag - no change
} else {
- it->render(Args, CmdArgs);
+ A->render(Args, CmdArgs);
}
}
@@ -252,54 +252,59 @@ void Clang::AddPreprocessingOptions(const Driver &D,
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targetting.
//
// FIXME: tblgen this.
-static const char *getARMTargetCPU(const ArgList &Args) {
+static const char *getARMTargetCPU(const ArgList &Args,
+ const llvm::Triple &Triple) {
// FIXME: Warn on inconsistent use of -mcpu and -march.
// If we have -mcpu=, use that.
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
return A->getValue(Args);
- // Otherwise, if we have -march= choose the base CPU for that arch.
+ llvm::StringRef MArch;
if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
- llvm::StringRef MArch = A->getValue(Args);
-
- if (MArch == "armv2" || MArch == "armv2a")
- return "arm2";
- if (MArch == "armv3")
- return "arm6";
- if (MArch == "armv3m")
- return "arm7m";
- if (MArch == "armv4" || MArch == "armv4t")
- return "arm7tdmi";
- if (MArch == "armv5" || MArch == "armv5t")
- return "arm10tdmi";
- if (MArch == "armv5e" || MArch == "armv5te")
- return "arm1026ejs";
- if (MArch == "armv5tej")
- return "arm926ej-s";
- if (MArch == "armv6" || MArch == "armv6k")
- return "arm1136jf-s";
- if (MArch == "armv6j")
- return "arm1136j-s";
- if (MArch == "armv6z" || MArch == "armv6zk")
- return "arm1176jzf-s";
- if (MArch == "armv6t2")
- return "arm1156t2-s";
- if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
- return "cortex-a8";
- if (MArch == "armv7r" || MArch == "armv7-r")
- return "cortex-r4";
- if (MArch == "armv7m" || MArch == "armv7-m")
- return "cortex-m3";
- if (MArch == "ep9312")
- return "ep9312";
- if (MArch == "iwmmxt")
- return "iwmmxt";
- if (MArch == "xscale")
- return "xscale";
- }
-
- // Otherwise return the most base CPU LLVM supports.
+ // Otherwise, if we have -march= choose the base CPU for that arch.
+ MArch = A->getValue(Args);
+ } else {
+ // Otherwise, use the Arch from the triple.
+ MArch = Triple.getArchName();
+ }
+
+ if (MArch == "armv2" || MArch == "armv2a")
+ return "arm2";
+ if (MArch == "armv3")
+ return "arm6";
+ if (MArch == "armv3m")
+ return "arm7m";
+ if (MArch == "armv4" || MArch == "armv4t")
+ return "arm7tdmi";
+ if (MArch == "armv5" || MArch == "armv5t")
+ return "arm10tdmi";
+ if (MArch == "armv5e" || MArch == "armv5te")
+ return "arm1026ejs";
+ if (MArch == "armv5tej")
+ return "arm926ej-s";
+ if (MArch == "armv6" || MArch == "armv6k")
+ return "arm1136jf-s";
+ if (MArch == "armv6j")
+ return "arm1136j-s";
+ if (MArch == "armv6z" || MArch == "armv6zk")
+ return "arm1176jzf-s";
+ if (MArch == "armv6t2")
+ return "arm1156t2-s";
+ if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
+ return "cortex-a8";
+ if (MArch == "armv7r" || MArch == "armv7-r")
+ return "cortex-r4";
+ if (MArch == "armv7m" || MArch == "armv7-m")
+ return "cortex-m3";
+ if (MArch == "ep9312")
+ return "ep9312";
+ if (MArch == "iwmmxt")
+ return "iwmmxt";
+ if (MArch == "xscale")
+ return "xscale";
+
+ // If all else failed, return the most base CPU LLVM supports.
return "arm7tdmi";
}
@@ -352,7 +357,8 @@ static std::string getLLVMTriple(const ToolChain &TC, const ArgList &Args) {
// Thumb2 is the default for V7 on Darwin.
//
// FIXME: Thumb should just be another -target-feaure, not in the triple.
- llvm::StringRef Suffix = getLLVMArchSuffixForARM(getARMTargetCPU(Args));
+ llvm::StringRef Suffix =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
bool ThumbDefault =
(Suffix == "v7" && TC.getTriple().getOS() == llvm::Triple::Darwin);
std::string ArchName = "arm";
@@ -385,6 +391,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) {
void Clang::AddARMTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const Driver &D = getToolChain().getDriver();
+ llvm::Triple Triple = getToolChain().getTriple();
// Select the ABI to use.
//
@@ -394,27 +401,20 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
ABIName = A->getValue(Args);
} else {
// Select the default based on the platform.
- switch (getToolChain().getTriple().getOS()) {
- // FIXME: Is this right for non-Darwin and non-Linux?
- default:
+ llvm::StringRef env = Triple.getEnvironmentName();
+ if (env == "gnueabi")
+ ABIName = "aapcs-linux";
+ else if (env == "eabi")
ABIName = "aapcs";
- break;
-
- case llvm::Triple::Darwin:
+ else
ABIName = "apcs-gnu";
- break;
-
- case llvm::Triple::Linux:
- ABIName = "aapcs-linux";
- break;
- }
}
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
// Set the CPU based on -march= and -mcpu=.
CmdArgs.push_back("-target-cpu");
- CmdArgs.push_back(getARMTargetCPU(Args));
+ CmdArgs.push_back(getARMTargetCPU(Args, Triple));
// Select the float ABI as determined by -msoft-float, -mhard-float, and
// -mfloat-abi=.
@@ -438,14 +438,14 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
// If unspecified, choose the default based on the platform.
if (FloatABI.empty()) {
- // FIXME: This is wrong for non-Darwin, we don't have a mechanism yet for
- // distinguishing things like linux-eabi vs linux-elf.
- switch (getToolChain().getTriple().getOS()) {
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ switch (Triple.getOS()) {
case llvm::Triple::Darwin: {
// Darwin defaults to "softfp" for v6 and v7.
//
// FIXME: Factor out an ARM class so we can cache the arch somewhere.
- llvm::StringRef ArchName = getLLVMArchSuffixForARM(getARMTargetCPU(Args));
+ llvm::StringRef ArchName =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
if (ArchName.startswith("v6") || ArchName.startswith("v7"))
FloatABI = "softfp";
else
@@ -453,6 +453,15 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
break;
}
+ case llvm::Triple::Linux: {
+ llvm::StringRef Env = getToolChain().getTriple().getEnvironmentName();
+ if (Env == "gnueabi") {
+ FloatABI = "softfp";
+ break;
+ }
+ }
+ // fall through
+
default:
// Assume "soft", but warn the user we are guessing.
FloatABI = "soft";
@@ -639,8 +648,8 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
for (arg_iterator it = Args.filtered_begin(options::OPT_m_x86_Features_Group),
ie = Args.filtered_end(); it != ie; ++it) {
- llvm::StringRef Name = it->getOption().getName();
- it->claim();
+ llvm::StringRef Name = (*it)->getOption().getName();
+ (*it)->claim();
// Skip over "-m".
assert(Name.startswith("-m") && "Invalid feature name.");
@@ -792,9 +801,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_Nothing) {
CmdArgs.push_back("-fsyntax-only");
- } else if (JA.getType() == types::TY_LLVMAsm) {
+ } else if (JA.getType() == types::TY_LLVM_IR ||
+ JA.getType() == types::TY_LTO_IR) {
CmdArgs.push_back("-emit-llvm");
- } else if (JA.getType() == types::TY_LLVMBC) {
+ } else if (JA.getType() == types::TY_LLVM_BC ||
+ JA.getType() == types::TY_LTO_BC) {
CmdArgs.push_back("-emit-llvm-bc");
} else if (JA.getType() == types::TY_PP_Asm) {
CmdArgs.push_back("-S");
@@ -988,6 +999,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
+ // -mno-omit-leaf-frame-pointer is default.
+ if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer, false))
+ CmdArgs.push_back("-momit-leaf-frame-pointer");
+
// -fno-math-errno is default.
if (Args.hasFlag(options::OPT_fmath_errno,
options::OPT_fno_math_errno,
@@ -1026,6 +1042,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
+ Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions);
+
Args.AddLastArg(CmdArgs, options::OPT_nostdinc);
Args.AddLastArg(CmdArgs, options::OPT_nostdincxx);
Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc);
@@ -1072,8 +1090,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
Std->render(Args, CmdArgs);
- if (Arg *A = Args.getLastArg(options::OPT_trigraphs))
- if (A->getIndex() > Std->getIndex())
+ if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs))
+ if (A != Std)
A->render(Args, CmdArgs);
} else {
// Honor -std-default.
@@ -1146,6 +1165,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue(Args));
}
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+
// -fhosted is default.
if (KernelOrKext || Args.hasFlag(options::OPT_ffreestanding,
options::OPT_fhosted,
@@ -1178,12 +1199,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- Args.AddLastArg(CmdArgs, options::OPT_fno_caret_diagnostics);
Args.AddLastArg(CmdArgs, options::OPT_fno_show_column);
Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+ Args.AddLastArg(CmdArgs, options::OPT_fwrapv);
Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
Args.AddLastArg(CmdArgs, options::OPT_pthread);
@@ -1347,6 +1368,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(clang::diag::warn_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
+ // -fcaret-diagnostics is default.
+ if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
+ options::OPT_fno_caret_diagnostics, true))
+ CmdArgs.push_back("-fno-caret-diagnostics");
+
// -fdiagnostics-fixit-info is default, only pass non-default.
if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
options::OPT_fno_diagnostics_fixit_info))
@@ -1376,6 +1402,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_show_source_location))
CmdArgs.push_back("-fno-show-source-location");
+ if (!Args.hasFlag(options::OPT_fspell_checking,
+ options::OPT_fno_spell_checking))
+ CmdArgs.push_back("-fno-spell-checking");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ))
+ A->render(Args, CmdArgs);
+
// -fdollars-in-identifiers default varies depending on platform and
// language; only pass if specified.
if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers,
@@ -1420,14 +1453,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
for (arg_iterator it = Args.filtered_begin(options::OPT_mllvm),
ie = Args.filtered_end(); it != ie; ++it) {
- it->claim();
+ (*it)->claim();
// We translate this by hand to the -cc1 argument, since nightly test uses
// it and developers have been trained to spell it with -mllvm.
- if (llvm::StringRef(it->getValue(Args, 0)) == "-disable-llvm-optzns")
+ if (llvm::StringRef((*it)->getValue(Args, 0)) == "-disable-llvm-optzns")
CmdArgs.push_back("-disable-llvm-optzns");
else
- it->render(Args, CmdArgs);
+ (*it)->render(Args, CmdArgs);
}
if (Output.getType() == types::TY_Dependencies) {
@@ -1457,30 +1490,34 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_undef);
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "clang"));
+ std::string Exec = getToolChain().getDriver().getClangProgramPath();
// Optionally embed the -cc1 level arguments into the debug info, for build
// analysis.
if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it)
+ (*it)->render(Args, OriginalArgs);
+
llvm::SmallString<256> Flags;
Flags += Exec;
- for (unsigned i = 0, e = CmdArgs.size(); i != e; ++i) {
+ for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
Flags += " ";
- Flags += CmdArgs[i];
+ Flags += OriginalArgs[i];
}
CmdArgs.push_back("-dwarf-debug-flags");
CmdArgs.push_back(Args.MakeArgString(Flags.str()));
}
- Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ Dest.addCommand(new Command(JA, *this, Exec.c_str(), CmdArgs));
// Explicitly warn that these options are unsupported, even though
// we are allowing compilation to continue.
for (arg_iterator it = Args.filtered_begin(options::OPT_pg),
ie = Args.filtered_end(); it != ie; ++it) {
- it->claim();
- D.Diag(clang::diag::warn_drv_clang_unsupported) << it->getAsString(Args);
+ (*it)->claim();
+ D.Diag(clang::diag::warn_drv_clang_unsupported) << (*it)->getAsString(Args);
}
// Claim some arguments which clang supports automatically.
@@ -1530,7 +1567,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_mrelax_all,
options::OPT_mno_relax_all,
!IsOpt))
- CmdArgs.push_back("-mrelax-all");
+ CmdArgs.push_back("-relax-all");
// FIXME: Add -force_cpusubtype_ALL support, once we have it.
@@ -1552,9 +1589,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Input.getFilename());
}
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "clang"));
- Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ std::string Exec = getToolChain().getDriver().getClangProgramPath();
+ Dest.addCommand(new Command(JA, *this, Exec.c_str(), CmdArgs));
}
void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
@@ -1630,7 +1666,8 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &II = *it;
// Don't try to pass LLVM or AST inputs to a generic gcc.
- if (II.getType() == types::TY_LLVMBC)
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
else if (II.getType() == types::TY_AST)
@@ -1653,7 +1690,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
const char *GCCName = getToolChain().getDriver().CCCGenericGCCName.c_str();
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, GCCName));
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -1672,7 +1709,8 @@ void gcc::Compile::RenderExtraToolArgs(const JobAction &JA,
const Driver &D = getToolChain().getDriver();
// If -flto, etc. are present then make sure not to force assembly output.
- if (JA.getType() == types::TY_LLVMBC)
+ if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR ||
+ JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC)
CmdArgs.push_back("-c");
else {
if (JA.getType() != types::TY_PP_Asm)
@@ -1845,10 +1883,10 @@ void darwin::CC1::AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
for (arg_iterator it = Args.filtered_begin(options::OPT_f_Group,
options::OPT_fsyntax_only),
ie = Args.filtered_end(); it != ie; ++it) {
- if (!it->getOption().matches(options::OPT_fbuiltin_strcat) &&
- !it->getOption().matches(options::OPT_fbuiltin_strcpy)) {
- it->claim();
- it->render(Args, CmdArgs);
+ if (!(*it)->getOption().matches(options::OPT_fbuiltin_strcat) &&
+ !(*it)->getOption().matches(options::OPT_fbuiltin_strcpy)) {
+ (*it)->claim();
+ (*it)->render(Args, CmdArgs);
}
}
} else
@@ -2059,7 +2097,7 @@ void darwin::Preprocess::ConstructJob(Compilation &C, const JobAction &JA,
const char *CC1Name = getCC1Name(Inputs[0].getType());
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name));
+ Args.MakeArgString(getToolChain().GetProgramPath(CC1Name));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2079,9 +2117,11 @@ void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(clang::diag::err_drv_argument_only_allowed_with)
<< A->getAsString(Args) << "-E";
- if (Output.getType() == types::TY_LLVMAsm)
+ if (JA.getType() == types::TY_LLVM_IR ||
+ JA.getType() == types::TY_LTO_IR)
CmdArgs.push_back("-emit-llvm");
- else if (Output.getType() == types::TY_LLVMBC)
+ else if (JA.getType() == types::TY_LLVM_BC ||
+ JA.getType() == types::TY_LTO_BC)
CmdArgs.push_back("-emit-llvm-bc");
else if (Output.getType() == types::TY_AST)
D.Diag(clang::diag::err_drv_no_ast_support)
@@ -2157,7 +2197,7 @@ void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA,
const char *CC1Name = getCC1Name(Inputs[0].getType());
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name));
+ Args.MakeArgString(getToolChain().GetProgramPath(CC1Name));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2212,30 +2252,10 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
// asm_final spec is empty.
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "as"));
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
-/// Helper routine for seeing if we should use dsymutil; this is a
-/// gcc compatible hack, we should remove it and use the input
-/// type information.
-static bool isSourceSuffix(const char *Str) {
- // match: 'C', 'CPP', 'c', 'cc', 'cp', 'c++', 'cpp', 'cxx', 'm',
- // 'mm'.
- return llvm::StringSwitch<bool>(Str)
- .Case("C", true)
- .Case("c", true)
- .Case("m", true)
- .Case("cc", true)
- .Case("cp", true)
- .Case("mm", true)
- .Case("CPP", true)
- .Case("c++", true)
- .Case("cpp", true)
- .Case("cxx", true)
- .Default(false);
-}
-
void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
ArgStringList &CmdArgs) const {
llvm::StringRef ArchName = getDarwinToolChain().getDarwinArchName(Args);
@@ -2335,8 +2355,15 @@ void darwin::Link::AddLinkArgs(const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined);
Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused);
- if (Args.hasArg(options::OPT_fpie))
- CmdArgs.push_back("-pie");
+ if (const Arg *A = Args.getLastArg(options::OPT_fpie, options::OPT_fPIE,
+ options::OPT_fno_pie,
+ options::OPT_fno_PIE)) {
+ if (A->getOption().matches(options::OPT_fpie) ||
+ A->getOption().matches(options::OPT_fPIE))
+ CmdArgs.push_back("-pie");
+ else
+ CmdArgs.push_back("-no_pie");
+ }
Args.AddLastArg(CmdArgs, options::OPT_prebind);
Args.AddLastArg(CmdArgs, options::OPT_noprebind);
@@ -2484,7 +2511,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasArg(options::OPT_shared_libgcc) &&
getDarwinToolChain().isMacosxVersionLT(10, 5)) {
const char *Str =
- Args.MakeArgString(getToolChain().GetFilePath(C, "crt3.o"));
+ Args.MakeArgString(getToolChain().GetFilePath("crt3.o"));
CmdArgs.push_back(Str);
}
}
@@ -2544,40 +2571,8 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_F);
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "ld"));
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
-
- // Find the first non-empty base input (we want to ignore linker
- // inputs).
- const char *BaseInput = "";
- for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
- if (Inputs[i].getBaseInput()[0] != '\0') {
- BaseInput = Inputs[i].getBaseInput();
- break;
- }
- }
-
- // Run dsymutil if we are making an executable in a single step.
- //
- // FIXME: Currently we don't want to do this when we are part of a
- // universal build step, as this would end up creating stray temp
- // files.
- if (!LinkingOutput &&
- Args.getLastArg(options::OPT_g_Group) &&
- !Args.getLastArg(options::OPT_gstabs) &&
- !Args.getLastArg(options::OPT_g0)) {
- // FIXME: This is gross, but matches gcc. The test only considers
- // the suffix (not the -x type), and then only of the first
- // source input. Awesome.
- const char *Suffix = strrchr(BaseInput, '.');
- if (Suffix && isSourceSuffix(Suffix + 1)) {
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "dsymutil"));
- ArgStringList CmdArgs;
- CmdArgs.push_back(Output.getFilename());
- C.getJobs().addCommand(new Command(JA, *this, Exec, CmdArgs));
- }
- }
}
void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
@@ -2600,7 +2595,27 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(II.getFilename());
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "lipo"));
+ Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
+ Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected dsymutil input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2630,7 +2645,7 @@ void auroraux::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "gas"));
+ Args.MakeArgString(getToolChain().GetProgramPath("gas"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2675,13 +2690,18 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
} else {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
}
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtn.o")));
}
CmdArgs.push_back(Args.MakeArgString("-L/opt/gcc4/lib/gcc/"
@@ -2697,7 +2717,8 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &II = *it;
// Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVMBC)
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
@@ -2725,13 +2746,12 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o")));
-// else
-// CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "ld"));
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2761,7 +2781,7 @@ void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "as"));
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2805,10 +2825,13 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt0.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
} else {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
}
}
@@ -2827,7 +2850,8 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &II = *it;
// Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVMBC)
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
@@ -2855,13 +2879,15 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtendS.o")));
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "ld"));
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2903,7 +2929,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "as"));
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -2947,12 +2973,17 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
} else {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
}
}
@@ -2965,7 +2996,8 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &II = *it;
// Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVMBC)
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
@@ -3011,14 +3043,120 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtend.o")));
else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtn.o")));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ if (Output.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("gas"));
+ Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Output.isPipe()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back("-");
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "/usr/gnu/lib/crtso.o")));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-L/usr/gnu/lib");
+ // FIXME: fill in the correct search path for the final
+ // support libraries.
+ CmdArgs.push_back("-L/usr/gnu/lib/gcc/i686-pc-minix/4.4.3");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "/usr/gnu/lib/libend.a")));
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "ld"));
+ Args.MakeArgString(getToolChain().GetProgramPath("/usr/gnu/bin/gld"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -3057,7 +3195,7 @@ void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "as"));
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
@@ -3100,12 +3238,17 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
} else {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
}
}
@@ -3118,7 +3261,8 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &II = *it;
// Don't try to pass LLVM inputs to a generic gcc.
- if (II.getType() == types::TY_LLVMBC)
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
@@ -3174,13 +3318,16 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nostartfiles)) {
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
else
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtn.o")));
}
const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath(C, "ld"));
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
index d5e98dd..2a18103 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Tools.h
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -288,6 +288,23 @@ namespace darwin {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
+
+ class LLVM_LIBRARY_VISIBILITY Dsymutil : public DarwinTool {
+ public:
+ Dsymutil(const ToolChain &TC) : DarwinTool("darwin::Dsymutil",
+ "dsymutil", TC) {}
+
+ virtual bool acceptsPipedInput() const { return false; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
}
/// openbsd -- Directly call GNU Binutils assembler and linker
@@ -360,6 +377,41 @@ namespace freebsd {
};
} // end namespace freebsd
+ /// minix -- Directly call GNU Binutils assembler and linker
+namespace minix {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("minix::Assemble", "assembler",
+ TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace minix
+
/// auroraux -- Directly call GNU Binutils assembler and linker
namespace auroraux {
class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
index 8857fb1..3c07cf2 100644
--- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -86,6 +86,20 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CXXHeader: case TY_PP_CXXHeader:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_AST:
+ case TY_LLVM_IR: case TY_LLVM_BC:
+ return true;
+ }
+}
+
+bool types::isOnlyAcceptedByClang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_AST:
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ case TY_RewrittenObjC:
return true;
}
}
@@ -132,15 +146,19 @@ types::ID types::lookupTypeForExtension(const char *Ext) {
.Case("ii", TY_PP_CXX)
.Case("mi", TY_PP_ObjC)
.Case("mm", TY_ObjCXX)
+ .Case("bc", TY_LLVM_BC)
.Case("cc", TY_CXX)
.Case("CC", TY_CXX)
.Case("cl", TY_CL)
.Case("cp", TY_CXX)
.Case("hh", TY_CXXHeader)
+ .Case("ll", TY_LLVM_IR)
.Case("hpp", TY_CXXHeader)
.Case("ads", TY_Ada)
.Case("adb", TY_Ada)
.Case("ast", TY_AST)
+ .Case("c++", TY_CXX)
+ .Case("C++", TY_CXX)
.Case("cxx", TY_CXX)
.Case("cpp", TY_CXX)
.Case("CPP", TY_CXX)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
index 7b8ebf9..87b01d4 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -13,7 +13,6 @@
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/DocumentXML.h"
-#include "clang/Frontend/PathDiagnosticClients.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
@@ -22,7 +21,6 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/PrettyPrinter.h"
-#include "clang/CodeGen/ModuleBuilder.h"
#include "llvm/Module.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
@@ -111,25 +109,14 @@ namespace {
}
void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- FD->print(llvm::errs());
-
- if (Stmt *Body = FD->getBody()) {
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ D->print(llvm::errs());
+
+ if (Stmt *Body = D->getBody()) {
llvm::errs() << '\n';
Body->viewAST();
llvm::errs() << '\n';
}
- return;
- }
-
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- MD->print(llvm::errs());
-
- if (MD->getBody()) {
- llvm::errs() << '\n';
- MD->getBody()->viewAST();
- llvm::errs() << '\n';
- }
}
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
index b0faf0a..e916e20 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -26,7 +26,8 @@ bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
// FIXME: This is a hack. We need a better way to communicate the
// AST file, compiler instance, and file name than member variables
// of FrontendAction.
- AdaptedAction->setCurrentFile(getCurrentFile(), takeCurrentASTUnit());
+ AdaptedAction->setCurrentFile(getCurrentFile(), getCurrentFileKind(),
+ takeCurrentASTUnit());
AdaptedAction->setCompilerInstance(&CI);
return AdaptedAction->BeginSourceFileAction(CI, Filename);
}
@@ -95,8 +96,8 @@ bool ASTMergeAction::hasPCHSupport() const {
return AdaptedAction->hasPCHSupport();
}
-bool ASTMergeAction::hasASTSupport() const {
- return AdaptedAction->hasASTSupport();
+bool ASTMergeAction::hasASTFileSupport() const {
+ return AdaptedAction->hasASTFileSupport();
}
bool ASTMergeAction::hasCodeCompletionSupport() const {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
index 4730bdc..88f0037 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -74,11 +74,13 @@ public:
return false;
}
- virtual bool ReadPredefinesBuffer(llvm::StringRef PCHPredef,
- FileID PCHBufferID,
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
llvm::StringRef OriginalFileName,
std::string &SuggestedPredefines) {
- Predefines = PCHPredef;
+ Predefines = Buffers[0].Data;
+ for (unsigned I = 1, N = Buffers.size(); I != N; ++I) {
+ Predefines += Buffers[I].Data;
+ }
return false;
}
@@ -219,6 +221,7 @@ ASTUnit *ASTUnit::LoadFromPCHFile(const std::string &Filename,
// FIXME: This is broken, we should store the TargetOptions in the PCH.
TargetOptions TargetOpts;
TargetOpts.ABI = "";
+ TargetOpts.CXXABI = "itanium";
TargetOpts.CPU = "";
TargetOpts.Features.clear();
TargetOpts.Triple = TargetTriple;
@@ -332,8 +335,10 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
assert(Clang.getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
- assert(Clang.getFrontendOpts().Inputs[0].first != FrontendOptions::IK_AST &&
+ assert(Clang.getFrontendOpts().Inputs[0].first != IK_AST &&
"FIXME: AST inputs not yet supported here!");
+ assert(Clang.getFrontendOpts().Inputs[0].first != IK_LLVM_IR &&
+ "IR inputs not support here!");
// Create the AST unit.
AST.reset(new ASTUnit(false));
@@ -354,12 +359,9 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
// Create the source manager.
Clang.setSourceManager(&AST->getSourceManager());
- // Create the preprocessor.
- Clang.createPreprocessor();
-
Act.reset(new TopLevelDeclTrackerAction(*AST));
if (!Act->BeginSourceFile(Clang, Clang.getFrontendOpts().Inputs[0].second,
- /*IsAST=*/false))
+ Clang.getFrontendOpts().Inputs[0].first))
goto error;
Act->Execute();
diff --git a/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp
index ae150c6..4a12ff2 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp
@@ -14,17 +14,17 @@
using namespace clang;
namespace {
- class BoostConASTConsumer : public ASTConsumer,
+ class BoostConASTConsumer : public ASTConsumer,
public RecursiveASTVisitor<BoostConASTConsumer> {
public:
/// HandleTranslationUnit - This method is called when the ASTs for entire
/// translation unit have been parsed.
virtual void HandleTranslationUnit(ASTContext &Ctx);
-
+
bool VisitCXXRecordDecl(CXXRecordDecl *D) {
std::cout << D->getNameAsString() << std::endl;
- return false;
- }
+ return true;
+ }
};
}
@@ -35,5 +35,5 @@ ASTConsumer *BoostConAction::CreateASTConsumer(CompilerInstance &CI,
void BoostConASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
fprintf(stderr, "Welcome to BoostCon!\n");
- Visit(Ctx.getTranslationUnitDecl());
+ TraverseDecl(Ctx.getTranslationUnitDecl());
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt
index 01592d1..8757e2c 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt
@@ -4,23 +4,18 @@ add_clang_library(clangFrontend
ASTConsumers.cpp
ASTMerge.cpp
ASTUnit.cpp
- AnalysisConsumer.cpp
BoostConAction.cpp
CacheTokens.cpp
- CodeGenAction.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
DeclXML.cpp
DependencyFile.cpp
DiagChecker.cpp
DocumentXML.cpp
- FixItRewriter.cpp
FrontendAction.cpp
FrontendActions.cpp
FrontendOptions.cpp
GeneratePCH.cpp
- HTMLDiagnostics.cpp
- HTMLPrint.cpp
InitHeaderSearch.cpp
InitPreprocessor.cpp
LangStandards.cpp
@@ -30,12 +25,8 @@ add_clang_library(clangFrontend
PCHWriter.cpp
PCHWriterDecl.cpp
PCHWriterStmt.cpp
- PlistDiagnostics.cpp
PrintParserCallbacks.cpp
PrintPreprocessedOutput.cpp
- RewriteMacros.cpp
- RewriteObjC.cpp
- RewriteTest.cpp
StmtXML.cpp
TextDiagnosticBuffer.cpp
TextDiagnosticPrinter.cpp
@@ -53,7 +44,10 @@ IF(MSVC)
ENDIF(MSVC)
add_dependencies(clangFrontend
+ ClangAttrClasses
+ ClangAttrList
ClangDiagnosticFrontend
ClangDiagnosticLex
ClangDiagnosticSema
+ ClangDeclNodes
ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp
deleted file mode 100644
index 3416aa8..0000000
--- a/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp
+++ /dev/null
@@ -1,593 +0,0 @@
-//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Frontend/CodeGenAction.h"
-#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/TargetInfo.h"
-#include "clang/Basic/TargetOptions.h"
-#include "clang/AST/ASTConsumer.h"
-#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclGroup.h"
-#include "clang/CodeGen/CodeGenOptions.h"
-#include "clang/CodeGen/ModuleBuilder.h"
-#include "clang/Frontend/ASTConsumers.h"
-#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/PassManager.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/Assembly/PrintModulePass.h"
-#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/Bitcode/ReaderWriter.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/SchedulerRegistry.h"
-#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/StandardPasses.h"
-#include "llvm/Support/Timer.h"
-#include "llvm/Target/SubtargetFeature.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
-using namespace clang;
-using namespace llvm;
-
-namespace {
- enum BackendAction {
- Backend_EmitAssembly, ///< Emit native assembly files
- Backend_EmitBC, ///< Emit LLVM bitcode files
- Backend_EmitLL, ///< Emit human-readable LLVM assembly
- Backend_EmitNothing, ///< Don't emit anything (benchmarking mode)
- Backend_EmitMCNull, ///< Run CodeGen, but don't emit anything
- Backend_EmitObj ///< Emit native object files
- };
-
- class BackendConsumer : public ASTConsumer {
- Diagnostic &Diags;
- BackendAction Action;
- const CodeGenOptions &CodeGenOpts;
- const LangOptions &LangOpts;
- const TargetOptions &TargetOpts;
- llvm::raw_ostream *AsmOutStream;
- llvm::formatted_raw_ostream FormattedOutStream;
- ASTContext *Context;
-
- Timer LLVMIRGeneration;
- Timer CodeGenerationTime;
-
- llvm::OwningPtr<CodeGenerator> Gen;
-
- llvm::OwningPtr<llvm::Module> TheModule;
- llvm::TargetData *TheTargetData;
-
- mutable FunctionPassManager *CodeGenPasses;
- mutable PassManager *PerModulePasses;
- mutable FunctionPassManager *PerFunctionPasses;
-
- FunctionPassManager *getCodeGenPasses() const;
- PassManager *getPerModulePasses() const;
- FunctionPassManager *getPerFunctionPasses() const;
-
- void CreatePasses();
-
- /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
- ///
- /// \return True on success.
- bool AddEmitPasses();
-
- void EmitAssembly();
-
- public:
- BackendConsumer(BackendAction action, Diagnostic &_Diags,
- const LangOptions &langopts, const CodeGenOptions &compopts,
- const TargetOptions &targetopts, bool TimePasses,
- const std::string &infile, llvm::raw_ostream *OS,
- LLVMContext &C) :
- Diags(_Diags),
- Action(action),
- CodeGenOpts(compopts),
- LangOpts(langopts),
- TargetOpts(targetopts),
- AsmOutStream(OS),
- LLVMIRGeneration("LLVM IR Generation Time"),
- CodeGenerationTime("Code Generation Time"),
- Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
- TheTargetData(0),
- CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {
-
- if (AsmOutStream)
- FormattedOutStream.setStream(*AsmOutStream,
- formatted_raw_ostream::PRESERVE_STREAM);
-
- llvm::TimePassesIsEnabled = TimePasses;
- }
-
- ~BackendConsumer() {
- delete TheTargetData;
- delete CodeGenPasses;
- delete PerModulePasses;
- delete PerFunctionPasses;
- }
-
- llvm::Module *takeModule() { return TheModule.take(); }
-
- virtual void Initialize(ASTContext &Ctx) {
- Context = &Ctx;
-
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.startTimer();
-
- Gen->Initialize(Ctx);
-
- TheModule.reset(Gen->GetModule());
- TheTargetData = new llvm::TargetData(Ctx.Target.getTargetDescription());
-
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.stopTimer();
- }
-
- virtual void HandleTopLevelDecl(DeclGroupRef D) {
- PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
- Context->getSourceManager(),
- "LLVM IR generation of declaration");
-
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.startTimer();
-
- Gen->HandleTopLevelDecl(D);
-
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.stopTimer();
- }
-
- virtual void HandleTranslationUnit(ASTContext &C) {
- {
- PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.startTimer();
-
- Gen->HandleTranslationUnit(C);
-
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.stopTimer();
- }
-
- // EmitAssembly times and registers crash info itself.
- EmitAssembly();
-
- // Force a flush here in case we never get released.
- if (AsmOutStream)
- FormattedOutStream.flush();
- }
-
- virtual void HandleTagDeclDefinition(TagDecl *D) {
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- Context->getSourceManager(),
- "LLVM IR generation of declaration");
- Gen->HandleTagDeclDefinition(D);
- }
-
- virtual void CompleteTentativeDefinition(VarDecl *D) {
- Gen->CompleteTentativeDefinition(D);
- }
-
- virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
- Gen->HandleVTable(RD, DefinitionRequired);
- }
-
- static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
- unsigned LocCookie) {
- SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
- ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
- }
-
- void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
- SourceLocation LocCookie);
- };
-}
-
-FunctionPassManager *BackendConsumer::getCodeGenPasses() const {
- if (!CodeGenPasses) {
- CodeGenPasses = new FunctionPassManager(&*TheModule);
- CodeGenPasses->add(new TargetData(*TheTargetData));
- }
-
- return CodeGenPasses;
-}
-
-PassManager *BackendConsumer::getPerModulePasses() const {
- if (!PerModulePasses) {
- PerModulePasses = new PassManager();
- PerModulePasses->add(new TargetData(*TheTargetData));
- }
-
- return PerModulePasses;
-}
-
-FunctionPassManager *BackendConsumer::getPerFunctionPasses() const {
- if (!PerFunctionPasses) {
- PerFunctionPasses = new FunctionPassManager(&*TheModule);
- PerFunctionPasses->add(new TargetData(*TheTargetData));
- }
-
- return PerFunctionPasses;
-}
-
-bool BackendConsumer::AddEmitPasses() {
- if (Action == Backend_EmitNothing)
- return true;
-
- if (Action == Backend_EmitBC) {
- getPerModulePasses()->add(createBitcodeWriterPass(FormattedOutStream));
- return true;
- }
-
- if (Action == Backend_EmitLL) {
- getPerModulePasses()->add(createPrintModulePass(&FormattedOutStream));
- return true;
- }
-
- bool Fast = CodeGenOpts.OptimizationLevel == 0;
-
- // Create the TargetMachine for generating code.
- std::string Error;
- std::string Triple = TheModule->getTargetTriple();
- const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
- if (!TheTarget) {
- Diags.Report(diag::err_fe_unable_to_create_target) << Error;
- return false;
- }
-
- // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
- // being gross, this is also totally broken if we ever care about
- // concurrency.
- llvm::NoFramePointerElim = CodeGenOpts.DisableFPElim;
- if (CodeGenOpts.FloatABI == "soft")
- llvm::FloatABIType = llvm::FloatABI::Soft;
- else if (CodeGenOpts.FloatABI == "hard")
- llvm::FloatABIType = llvm::FloatABI::Hard;
- else {
- assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
- llvm::FloatABIType = llvm::FloatABI::Default;
- }
- NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
- UnwindTablesMandatory = CodeGenOpts.UnwindTables;
-
- TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
-
- TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
- TargetMachine::setDataSections (CodeGenOpts.DataSections);
-
- // FIXME: Parse this earlier.
- if (CodeGenOpts.RelocationModel == "static") {
- TargetMachine::setRelocationModel(llvm::Reloc::Static);
- } else if (CodeGenOpts.RelocationModel == "pic") {
- TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
- } else {
- assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
- "Invalid PIC model!");
- TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
- }
- // FIXME: Parse this earlier.
- if (CodeGenOpts.CodeModel == "small") {
- TargetMachine::setCodeModel(llvm::CodeModel::Small);
- } else if (CodeGenOpts.CodeModel == "kernel") {
- TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
- } else if (CodeGenOpts.CodeModel == "medium") {
- TargetMachine::setCodeModel(llvm::CodeModel::Medium);
- } else if (CodeGenOpts.CodeModel == "large") {
- TargetMachine::setCodeModel(llvm::CodeModel::Large);
- } else {
- assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
- TargetMachine::setCodeModel(llvm::CodeModel::Default);
- }
-
- std::vector<const char *> BackendArgs;
- BackendArgs.push_back("clang"); // Fake program name.
- if (!CodeGenOpts.DebugPass.empty()) {
- BackendArgs.push_back("-debug-pass");
- BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
- }
- if (!CodeGenOpts.LimitFloatPrecision.empty()) {
- BackendArgs.push_back("-limit-float-precision");
- BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
- }
- if (llvm::TimePassesIsEnabled)
- BackendArgs.push_back("-time-passes");
- BackendArgs.push_back(0);
- llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
- const_cast<char **>(&BackendArgs[0]));
-
- std::string FeaturesStr;
- if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
- SubtargetFeatures Features;
- Features.setCPU(TargetOpts.CPU);
- for (std::vector<std::string>::const_iterator
- it = TargetOpts.Features.begin(),
- ie = TargetOpts.Features.end(); it != ie; ++it)
- Features.AddFeature(*it);
- FeaturesStr = Features.getString();
- }
- TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
-
- if (CodeGenOpts.RelaxAll)
- TM->setMCRelaxAll(true);
-
- // Set register scheduler & allocation policy.
- RegisterScheduler::setDefault(createDefaultScheduler);
- RegisterRegAlloc::setDefault(Fast ? createLocalRegisterAllocator :
- createLinearScanRegisterAllocator);
-
- // Create the code generator passes.
- FunctionPassManager *PM = getCodeGenPasses();
- CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
-
- switch (CodeGenOpts.OptimizationLevel) {
- default: break;
- case 0: OptLevel = CodeGenOpt::None; break;
- case 3: OptLevel = CodeGenOpt::Aggressive; break;
- }
-
- // Normal mode, emit a .s or .o file by running the code generator. Note,
- // this also adds codegenerator level optimization passes.
- TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
- if (Action == Backend_EmitObj)
- CGFT = TargetMachine::CGFT_ObjectFile;
- else if (Action == Backend_EmitMCNull)
- CGFT = TargetMachine::CGFT_Null;
- else
- assert(Action == Backend_EmitAssembly && "Invalid action!");
- if (TM->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, OptLevel,
- /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
- Diags.Report(diag::err_fe_unable_to_interface_with_target);
- return false;
- }
-
- return true;
-}
-
-void BackendConsumer::CreatePasses() {
- unsigned OptLevel = CodeGenOpts.OptimizationLevel;
- CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
-
- // Handle disabling of LLVM optimization, where we want to preserve the
- // internal module before any optimization.
- if (CodeGenOpts.DisableLLVMOpts) {
- OptLevel = 0;
- Inlining = CodeGenOpts.NoInlining;
- }
-
- // In -O0 if checking is disabled, we don't even have per-function passes.
- if (CodeGenOpts.VerifyModule)
- getPerFunctionPasses()->add(createVerifierPass());
-
- // Assume that standard function passes aren't run for -O0.
- if (OptLevel > 0)
- llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
-
- llvm::Pass *InliningPass = 0;
- switch (Inlining) {
- case CodeGenOptions::NoInlining: break;
- case CodeGenOptions::NormalInlining: {
- // Set the inline threshold following llvm-gcc.
- //
- // FIXME: Derive these constants in a principled fashion.
- unsigned Threshold = 225;
- if (CodeGenOpts.OptimizeSize)
- Threshold = 75;
- else if (OptLevel > 2)
- Threshold = 275;
- InliningPass = createFunctionInliningPass(Threshold);
- break;
- }
- case CodeGenOptions::OnlyAlwaysInlining:
- InliningPass = createAlwaysInlinerPass(); // Respect always_inline
- break;
- }
-
- // For now we always create per module passes.
- PassManager *PM = getPerModulePasses();
- llvm::createStandardModulePasses(PM, OptLevel, CodeGenOpts.OptimizeSize,
- CodeGenOpts.UnitAtATime,
- CodeGenOpts.UnrollLoops,
- /*SimplifyLibCalls=*/!LangOpts.NoBuiltin,
- /*HaveExceptions=*/true,
- InliningPass);
-}
-
-/// EmitAssembly - Handle interaction with LLVM backend to generate
-/// actual machine code.
-void BackendConsumer::EmitAssembly() {
- // Silently ignore if we weren't initialized for some reason.
- if (!TheModule || !TheTargetData)
- return;
-
- TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
-
- // Make sure IR generation is happy with the module. This is
- // released by the module provider.
- Module *M = Gen->ReleaseModule();
- if (!M) {
- // The module has been released by IR gen on failures, do not
- // double free.
- TheModule.take();
- return;
- }
-
- assert(TheModule.get() == M &&
- "Unexpected module change during IR generation");
-
- CreatePasses();
- if (!AddEmitPasses())
- return;
-
- // Run passes. For now we do all passes at once, but eventually we
- // would like to have the option of streaming code generation.
-
- if (PerFunctionPasses) {
- PrettyStackTraceString CrashInfo("Per-function optimization");
-
- PerFunctionPasses->doInitialization();
- for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
- if (!I->isDeclaration())
- PerFunctionPasses->run(*I);
- PerFunctionPasses->doFinalization();
- }
-
- if (PerModulePasses) {
- PrettyStackTraceString CrashInfo("Per-module optimization passes");
- PerModulePasses->run(*M);
- }
-
- if (CodeGenPasses) {
- PrettyStackTraceString CrashInfo("Code generation");
-
- // Install an inline asm handler so that diagnostics get printed through our
- // diagnostics hooks.
- LLVMContext &Ctx = TheModule->getContext();
- void *OldHandler = Ctx.getInlineAsmDiagnosticHandler();
- void *OldContext = Ctx.getInlineAsmDiagnosticContext();
- Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler,
- this);
-
- CodeGenPasses->doInitialization();
- for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
- if (!I->isDeclaration())
- CodeGenPasses->run(*I);
- CodeGenPasses->doFinalization();
-
- Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
- }
-}
-
-/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
-/// buffer to be a valid FullSourceLoc.
-static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
- SourceManager &CSM) {
- // Get both the clang and llvm source managers. The location is relative to
- // a memory buffer that the LLVM Source Manager is handling, we need to add
- // a copy to the Clang source manager.
- const llvm::SourceMgr &LSM = *D.getSourceMgr();
-
- // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
- // already owns its one and clang::SourceManager wants to own its one.
- const MemoryBuffer *LBuf =
- LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
-
- // Create the copy and transfer ownership to clang::SourceManager.
- llvm::MemoryBuffer *CBuf =
- llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
- LBuf->getBufferIdentifier());
- FileID FID = CSM.createFileIDForMemBuffer(CBuf);
-
- // Translate the offset into the file.
- unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
- SourceLocation NewLoc =
- CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset);
- return FullSourceLoc(NewLoc, CSM);
-}
-
-
-/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
-/// error parsing inline asm. The SMDiagnostic indicates the error relative to
-/// the temporary memory buffer that the inline asm parser has set up.
-void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
- SourceLocation LocCookie) {
- // There are a couple of different kinds of errors we could get here. First,
- // we re-format the SMDiagnostic in terms of a clang diagnostic.
-
- // Strip "error: " off the start of the message string.
- llvm::StringRef Message = D.getMessage();
- if (Message.startswith("error: "))
- Message = Message.substr(7);
-
- // There are two cases: the SMDiagnostic could have a inline asm source
- // location or it might not. If it does, translate the location.
- FullSourceLoc Loc;
- if (D.getLoc() != SMLoc())
- Loc = ConvertBackendLocation(D, Context->getSourceManager());
- Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
-
- // This could be a problem with no clang-level source location information.
- // In this case, LocCookie is invalid. If there is source level information,
- // print an "generated from" note.
- if (LocCookie.isValid())
- Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()),
- diag::note_fe_inline_asm_here);
-}
-
-//
-
-CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
-
-CodeGenAction::~CodeGenAction() {}
-
-void CodeGenAction::EndSourceFileAction() {
- // If the consumer creation failed, do nothing.
- if (!getCompilerInstance().hasASTConsumer())
- return;
-
- // Steal the module from the consumer.
- BackendConsumer *Consumer = static_cast<BackendConsumer*>(
- &getCompilerInstance().getASTConsumer());
-
- TheModule.reset(Consumer->takeModule());
-}
-
-llvm::Module *CodeGenAction::takeModule() {
- return TheModule.take();
-}
-
-ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- BackendAction BA = static_cast<BackendAction>(Act);
- llvm::OwningPtr<llvm::raw_ostream> OS;
- switch (BA) {
- case Backend_EmitAssembly:
- OS.reset(CI.createDefaultOutputFile(false, InFile, "s"));
- break;
- case Backend_EmitLL:
- OS.reset(CI.createDefaultOutputFile(false, InFile, "ll"));
- break;
- case Backend_EmitBC:
- OS.reset(CI.createDefaultOutputFile(true, InFile, "bc"));
- break;
- case Backend_EmitNothing:
- break;
- case Backend_EmitMCNull:
- case Backend_EmitObj:
- OS.reset(CI.createDefaultOutputFile(true, InFile, "o"));
- break;
- }
- if (BA != Backend_EmitNothing && !OS)
- return 0;
-
- return new BackendConsumer(BA, CI.getDiagnostics(), CI.getLangOpts(),
- CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
- CI.getLLVMContext());
-}
-
-EmitAssemblyAction::EmitAssemblyAction()
- : CodeGenAction(Backend_EmitAssembly) {}
-
-EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
-
-EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
-
-EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
-
-EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {}
-
-EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
index 2b25168..5037c83 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -37,7 +37,7 @@
using namespace clang;
CompilerInstance::CompilerInstance()
- : Invocation(new CompilerInvocation()) {
+ : Invocation(new CompilerInvocation()), Reader(0) {
}
CompilerInstance::~CompilerInstance() {
@@ -255,6 +255,8 @@ void CompilerInstance::createPCHExternalASTSource(llvm::StringRef Path) {
llvm::OwningPtr<ExternalASTSource> Source;
Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
getPreprocessor(), getASTContext()));
+ // Remember the PCHReader, but in a non-owning way.
+ Reader = static_cast<PCHReader*>(Source.get());
getASTContext().setExternalSource(Source);
}
@@ -442,7 +444,7 @@ bool CompilerInstance::InitializeSourceManager(llvm::StringRef InputFile,
}
} else {
llvm::MemoryBuffer *SB = llvm::MemoryBuffer::getSTDIN();
- SourceMgr.createMainFileIDForMemBuffer(SB);
+ if (SB) SourceMgr.createMainFileIDForMemBuffer(SB);
if (SourceMgr.getMainFileID().isInvalid()) {
Diags.Report(diag::err_fe_error_reading_stdin);
return false;
@@ -489,27 +491,11 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
const std::string &InFile = getFrontendOpts().Inputs[i].second;
- // If we aren't using an AST file, setup the file and source managers and
- // the preprocessor.
- bool IsAST = getFrontendOpts().Inputs[i].first == FrontendOptions::IK_AST;
- if (!IsAST) {
- if (!i) {
- // Create a file manager object to provide access to and cache the
- // filesystem.
- createFileManager();
-
- // Create the source manager.
- createSourceManager();
- } else {
- // Reset the ID tables if we are reusing the SourceManager.
- getSourceManager().clearIDTables();
- }
-
- // Create the preprocessor.
- createPreprocessor();
- }
+ // Reset the ID tables if we are reusing the SourceManager.
+ if (hasSourceManager())
+ getSourceManager().clearIDTables();
- if (Act.BeginSourceFile(*this, InFile, IsAST)) {
+ if (Act.BeginSourceFile(*this, InFile, getFrontendOpts().Inputs[i].first)) {
Act.Execute();
Act.EndSourceFile();
}
@@ -530,7 +516,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
OS << " generated.\n";
}
- if (getFrontendOpts().ShowStats) {
+ if (getFrontendOpts().ShowStats && hasFileManager()) {
getFileManager().PrintStats();
OS << "\n";
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
index ff372e1..53debdb 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -112,6 +112,8 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts,
Res.push_back("-analyzer-experimental-checks");
if (Opts.EnableExperimentalInternalChecks)
Res.push_back("-analyzer-experimental-internal-checks");
+ if (Opts.EnableIdempotentOperationChecker)
+ Res.push_back("-analyzer-idempotent-operation");
}
static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
@@ -132,6 +134,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-fno-common");
if (Opts.NoImplicitFloat)
Res.push_back("-no-implicit-float");
+ if (Opts.OmitLeafFramePointer)
+ Res.push_back("-momit-leaf-frame-pointer");
if (Opts.OptimizeSize) {
assert(Opts.OptimizationLevel == 2 && "Invalid options!");
Res.push_back("-Os");
@@ -280,20 +284,21 @@ static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
Res.push_back("-W" + Opts.Warnings[i]);
}
-static const char *getInputKindName(FrontendOptions::InputKind Kind) {
+static const char *getInputKindName(InputKind Kind) {
switch (Kind) {
- case FrontendOptions::IK_None: break;
- case FrontendOptions::IK_AST: return "ast";
- case FrontendOptions::IK_Asm: return "assembler-with-cpp";
- case FrontendOptions::IK_C: return "c";
- case FrontendOptions::IK_CXX: return "c++";
- case FrontendOptions::IK_ObjC: return "objective-c";
- case FrontendOptions::IK_ObjCXX: return "objective-c++";
- case FrontendOptions::IK_OpenCL: return "cl";
- case FrontendOptions::IK_PreprocessedC: return "cpp-output";
- case FrontendOptions::IK_PreprocessedCXX: return "c++-cpp-output";
- case FrontendOptions::IK_PreprocessedObjC: return "objective-c-cpp-output";
- case FrontendOptions::IK_PreprocessedObjCXX:return "objective-c++-cpp-output";
+ case IK_None: break;
+ case IK_AST: return "ast";
+ case IK_Asm: return "assembler-with-cpp";
+ case IK_C: return "c";
+ case IK_CXX: return "c++";
+ case IK_LLVM_IR: return "ir";
+ case IK_ObjC: return "objective-c";
+ case IK_ObjCXX: return "objective-c++";
+ case IK_OpenCL: return "cl";
+ case IK_PreprocessedC: return "cpp-output";
+ case IK_PreprocessedCXX: return "c++-cpp-output";
+ case IK_PreprocessedObjC: return "objective-c-cpp-output";
+ case IK_PreprocessedObjCXX:return "objective-c++-cpp-output";
}
llvm_unreachable("Unexpected language kind!");
@@ -348,6 +353,8 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
Res.push_back("-disable-free");
if (Opts.RelocatablePCH)
Res.push_back("-relocatable-pch");
+ if (Opts.ChainedPCH)
+ Res.push_back("-chained-pch");
if (Opts.ShowHelp)
Res.push_back("-help");
if (Opts.ShowMacrosInCodeCompletion)
@@ -396,6 +403,10 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts,
if (!Opts.ActionName.empty()) {
Res.push_back("-plugin");
Res.push_back(Opts.ActionName);
+ for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i) {
+ Res.push_back("-plugin-arg-" + Opts.ActionName);
+ Res.push_back(Opts.PluginArgs[i]);
+ }
}
for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i) {
Res.push_back("-load");
@@ -546,8 +557,11 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("-femit-all-decls");
if (Opts.MathErrno)
Res.push_back("-fmath-errno");
- if (Opts.OverflowChecking)
- Res.push_back("-ftrapv");
+ switch (Opts.getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined: break;
+ case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break;
+ case LangOptions::SOB_Trapping: Res.push_back("-ftrapv"); break;
+ }
if (Opts.HeinousExtensions)
Res.push_back("-fheinous-gnu-extensions");
// Optimize is implicit.
@@ -596,6 +610,9 @@ static void LangOptsToArgs(const LangOptions &Opts,
Res.push_back("protected");
}
}
+ if (Opts.InlineVisibilityHidden)
+ Res.push_back("-fvisibility-inlines-hidden");
+
if (Opts.getStackProtectorMode() != 0) {
Res.push_back("-stack-protector");
Res.push_back(llvm::utostr(Opts.getStackProtectorMode()));
@@ -681,6 +698,8 @@ static void TargetOptsToArgs(const TargetOptions &Opts,
Res.push_back("-target-abi");
Res.push_back(Opts.ABI);
}
+ Res.push_back("-cxx-abi");
+ Res.push_back(Opts.CXXABI);
for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i) {
Res.push_back("-target-feature");
Res.push_back(Opts.Features[i]);
@@ -728,7 +747,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
// FIXME: Error handling.
if (Value == NumStores)
Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_O)->getAsString(Args) << Name;
+ << A->getAsString(Args) << Name;
else
Opts.AnalysisStoreOpt = Value;
}
@@ -743,7 +762,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
// FIXME: Error handling.
if (Value == NumConstraints)
Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_O)->getAsString(Args) << Name;
+ << A->getAsString(Args) << Name;
else
Opts.AnalysisConstraintsOpt = Value;
}
@@ -758,7 +777,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
// FIXME: Error handling.
if (Value == NUM_ANALYSIS_DIAG_CLIENTS)
Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_O)->getAsString(Args) << Name;
+ << A->getAsString(Args) << Name;
else
Opts.AnalysisDiagOpt = Value;
}
@@ -775,6 +794,8 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Opts.EnableExperimentalChecks = Args.hasArg(OPT_analyzer_experimental_checks);
Opts.EnableExperimentalInternalChecks =
Args.hasArg(OPT_analyzer_experimental_internal_checks);
+ Opts.EnableIdempotentOperationChecker =
+ Args.hasArg(OPT_analyzer_idempotent_operation);
Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 3, Diags);
@@ -808,6 +829,8 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.NoCommon = Args.hasArg(OPT_fno_common);
Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
Opts.OptimizeSize = Args.hasArg(OPT_Os);
+ Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
+ Args.hasArg(OPT_ffreestanding));
Opts.UnrollLoops = (Opts.OptimizationLevel > 1 && !Opts.OptimizeSize);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
@@ -820,6 +843,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
+ Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
@@ -830,6 +854,8 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
+ Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
+
if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
llvm::StringRef Name = A->getValue(Args);
unsigned Method = llvm::StringSwitch<unsigned>(Name)
@@ -866,7 +892,18 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option);
-
+
+ llvm::StringRef ShowOverloads =
+ Args.getLastArgValue(OPT_fshow_overloads_EQ, "all");
+ if (ShowOverloads == "best")
+ Opts.ShowOverloads = Diagnostic::Ovl_Best;
+ else if (ShowOverloads == "all")
+ Opts.ShowOverloads = Diagnostic::Ovl_All;
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args)
+ << ShowOverloads;
+
llvm::StringRef ShowCategory =
Args.getLastArgValue(OPT_fdiagnostics_show_category, "none");
if (ShowCategory == "none")
@@ -903,8 +940,8 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.Warnings = Args.getAllArgValues(OPT_W);
}
-static FrontendOptions::InputKind
-ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) {
+static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
+ Diagnostic &Diags) {
using namespace cc1options;
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
@@ -972,9 +1009,17 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) {
Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
}
}
- if (const Arg *A = Args.getLastArg(OPT_plugin)) {
+
+ if (const Arg* A = Args.getLastArg(OPT_plugin)) {
+ Opts.Plugins.push_back(A->getValue(Args,0));
Opts.ProgramAction = frontend::PluginAction;
Opts.ActionName = A->getValue(Args);
+
+ for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
+ end = Args.filtered_end(); it != end; ++it) {
+ if ((*it)->getValue(Args, 0) == Opts.ActionName)
+ Opts.PluginArgs.push_back((*it)->getValue(Args, 1));
+ }
}
if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
@@ -991,6 +1036,7 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) {
Opts.OutputFile = Args.getLastArgValue(OPT_o);
Opts.Plugins = Args.getAllArgValues(OPT_load);
Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
+ Opts.ChainedPCH = Args.hasArg(OPT_chained_pch);
Opts.ShowHelp = Args.hasArg(OPT_help);
Opts.ShowMacrosInCodeCompletion = Args.hasArg(OPT_code_completion_macros);
Opts.ShowCodePatternsInCodeCompletion
@@ -1002,28 +1048,29 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) {
Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
- FrontendOptions::InputKind DashX = FrontendOptions::IK_None;
+ InputKind DashX = IK_None;
if (const Arg *A = Args.getLastArg(OPT_x)) {
- DashX = llvm::StringSwitch<FrontendOptions::InputKind>(A->getValue(Args))
- .Case("c", FrontendOptions::IK_C)
- .Case("cl", FrontendOptions::IK_OpenCL)
- .Case("c", FrontendOptions::IK_C)
- .Case("cl", FrontendOptions::IK_OpenCL)
- .Case("c++", FrontendOptions::IK_CXX)
- .Case("objective-c", FrontendOptions::IK_ObjC)
- .Case("objective-c++", FrontendOptions::IK_ObjCXX)
- .Case("cpp-output", FrontendOptions::IK_PreprocessedC)
- .Case("assembler-with-cpp", FrontendOptions::IK_Asm)
- .Case("c++-cpp-output", FrontendOptions::IK_PreprocessedCXX)
- .Case("objective-c-cpp-output", FrontendOptions::IK_PreprocessedObjC)
- .Case("objective-c++-cpp-output", FrontendOptions::IK_PreprocessedObjCXX)
- .Case("c-header", FrontendOptions::IK_C)
- .Case("objective-c-header", FrontendOptions::IK_ObjC)
- .Case("c++-header", FrontendOptions::IK_CXX)
- .Case("objective-c++-header", FrontendOptions::IK_ObjCXX)
- .Case("ast", FrontendOptions::IK_AST)
- .Default(FrontendOptions::IK_None);
- if (DashX == FrontendOptions::IK_None)
+ DashX = llvm::StringSwitch<InputKind>(A->getValue(Args))
+ .Case("c", IK_C)
+ .Case("cl", IK_OpenCL)
+ .Case("c", IK_C)
+ .Case("cl", IK_OpenCL)
+ .Case("c++", IK_CXX)
+ .Case("objective-c", IK_ObjC)
+ .Case("objective-c++", IK_ObjCXX)
+ .Case("cpp-output", IK_PreprocessedC)
+ .Case("assembler-with-cpp", IK_Asm)
+ .Case("c++-cpp-output", IK_PreprocessedCXX)
+ .Case("objective-c-cpp-output", IK_PreprocessedObjC)
+ .Case("objective-c++-cpp-output", IK_PreprocessedObjCXX)
+ .Case("c-header", IK_C)
+ .Case("objective-c-header", IK_ObjC)
+ .Case("c++-header", IK_CXX)
+ .Case("objective-c++-header", IK_ObjCXX)
+ .Case("ast", IK_AST)
+ .Case("ir", IK_LLVM_IR)
+ .Default(IK_None);
+ if (DashX == IK_None)
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue(Args);
}
@@ -1034,8 +1081,8 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) {
if (Inputs.empty())
Inputs.push_back("-");
for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
- FrontendOptions::InputKind IK = DashX;
- if (IK == FrontendOptions::IK_None) {
+ InputKind IK = DashX;
+ if (IK == IK_None) {
IK = FrontendOptions::getInputKindForExtension(
llvm::StringRef(Inputs[i]).rsplit('.').second);
// FIXME: Remove this hack.
@@ -1077,51 +1124,51 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
// Add -I... and -F... options in order.
for (arg_iterator it = Args.filtered_begin(OPT_I, OPT_F),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath(it->getValue(Args), frontend::Angled, true,
- /*IsFramework=*/ it->getOption().matches(OPT_F));
+ Opts.AddPath((*it)->getValue(Args), frontend::Angled, true,
+ /*IsFramework=*/ (*it)->getOption().matches(OPT_F));
// Add -iprefix/-iwith-prefix/-iwithprefixbefore options.
llvm::StringRef Prefix = ""; // FIXME: This isn't the correct default prefix.
for (arg_iterator it = Args.filtered_begin(OPT_iprefix, OPT_iwithprefix,
OPT_iwithprefixbefore),
ie = Args.filtered_end(); it != ie; ++it) {
- if (it->getOption().matches(OPT_iprefix))
- Prefix = it->getValue(Args);
- else if (it->getOption().matches(OPT_iwithprefix))
- Opts.AddPath(Prefix.str() + it->getValue(Args),
+ const Arg *A = *it;
+ if (A->getOption().matches(OPT_iprefix))
+ Prefix = A->getValue(Args);
+ else if (A->getOption().matches(OPT_iwithprefix))
+ Opts.AddPath(Prefix.str() + A->getValue(Args),
frontend::System, false, false);
else
- Opts.AddPath(Prefix.str() + it->getValue(Args),
+ Opts.AddPath(Prefix.str() + A->getValue(Args),
frontend::Angled, false, false);
}
for (arg_iterator it = Args.filtered_begin(OPT_idirafter),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath(it->getValue(Args), frontend::After, true, false);
+ Opts.AddPath((*it)->getValue(Args), frontend::After, true, false);
for (arg_iterator it = Args.filtered_begin(OPT_iquote),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath(it->getValue(Args), frontend::Quoted, true, false);
+ Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false);
for (arg_iterator it = Args.filtered_begin(OPT_isystem),
ie = Args.filtered_end(); it != ie; ++it)
- Opts.AddPath(it->getValue(Args), frontend::System, true, false);
+ Opts.AddPath((*it)->getValue(Args), frontend::System, true, false);
// FIXME: Need options for the various environment variables!
}
-static void ParseLangArgs(LangOptions &Opts, ArgList &Args,
- FrontendOptions::InputKind IK,
+static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Diagnostic &Diags) {
// FIXME: Cleanup per-file based stuff.
// Set some properties which depend soley on the input kind; it would be nice
// to move these to the language standard, and have the driver resolve the
// input kind + language standard.
- if (IK == FrontendOptions::IK_Asm) {
+ if (IK == IK_Asm) {
Opts.AsmPreprocessor = 1;
- } else if (IK == FrontendOptions::IK_ObjC ||
- IK == FrontendOptions::IK_ObjCXX ||
- IK == FrontendOptions::IK_PreprocessedObjC ||
- IK == FrontendOptions::IK_PreprocessedObjCXX) {
+ } else if (IK == IK_ObjC ||
+ IK == IK_ObjCXX ||
+ IK == IK_PreprocessedObjC ||
+ IK == IK_PreprocessedObjCXX) {
Opts.ObjC1 = Opts.ObjC2 = 1;
}
@@ -1140,23 +1187,24 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args,
if (LangStd == LangStandard::lang_unspecified) {
// Based on the base language, pick one.
switch (IK) {
- case FrontendOptions::IK_None:
- case FrontendOptions::IK_AST:
+ case IK_None:
+ case IK_AST:
+ case IK_LLVM_IR:
assert(0 && "Invalid input kind!");
- case FrontendOptions::IK_OpenCL:
+ case IK_OpenCL:
LangStd = LangStandard::lang_opencl;
break;
- case FrontendOptions::IK_Asm:
- case FrontendOptions::IK_C:
- case FrontendOptions::IK_PreprocessedC:
- case FrontendOptions::IK_ObjC:
- case FrontendOptions::IK_PreprocessedObjC:
+ case IK_Asm:
+ case IK_C:
+ case IK_PreprocessedC:
+ case IK_ObjC:
+ case IK_PreprocessedObjC:
LangStd = LangStandard::lang_gnu99;
break;
- case FrontendOptions::IK_CXX:
- case FrontendOptions::IK_PreprocessedCXX:
- case FrontendOptions::IK_ObjCXX:
- case FrontendOptions::IK_PreprocessedObjCXX:
+ case IK_CXX:
+ case IK_PreprocessedCXX:
+ case IK_ObjCXX:
+ case IK_PreprocessedObjCXX:
LangStd = LangStandard::lang_gnucxx98;
break;
}
@@ -1222,7 +1270,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis;
- Opts.OverflowChecking = Args.hasArg(OPT_ftrapv);
+ if (Args.hasArg(OPT_fvisibility_inlines_hidden))
+ Opts.InlineVisibilityHidden = 1;
+
+ if (Args.hasArg(OPT_ftrapv))
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
+ else if (Args.hasArg(OPT_fwrapv))
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
// Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
// is specified, or -std is set to a conforming mode.
@@ -1270,6 +1324,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.Static = Args.hasArg(OPT_static_define);
Opts.DumpRecordLayouts = Args.hasArg(OPT_fdump_record_layouts);
Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
+ Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
Opts.OptimizeSize = 0;
@@ -1311,10 +1366,10 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
// Add macros from the command line.
for (arg_iterator it = Args.filtered_begin(OPT_D, OPT_U),
ie = Args.filtered_end(); it != ie; ++it) {
- if (it->getOption().matches(OPT_D))
- Opts.addMacroDef(it->getValue(Args));
+ if ((*it)->getOption().matches(OPT_D))
+ Opts.addMacroDef((*it)->getValue(Args));
else
- Opts.addMacroUndef(it->getValue(Args));
+ Opts.addMacroUndef((*it)->getValue(Args));
}
Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
@@ -1323,16 +1378,17 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (arg_iterator it = Args.filtered_begin(OPT_include, OPT_include_pch,
OPT_include_pth),
ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
// PCH is handled specially, we need to extra the original include path.
- if (it->getOption().matches(OPT_include_pch)) {
+ if (A->getOption().matches(OPT_include_pch)) {
std::string OriginalFile =
- PCHReader::getOriginalSourceFile(it->getValue(Args), Diags);
+ PCHReader::getOriginalSourceFile(A->getValue(Args), Diags);
if (OriginalFile.empty())
continue;
Opts.Includes.push_back(OriginalFile);
} else
- Opts.Includes.push_back(it->getValue(Args));
+ Opts.Includes.push_back(A->getValue(Args));
}
// Include 'altivec.h' if -faltivec option present
@@ -1341,11 +1397,12 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
for (arg_iterator it = Args.filtered_begin(OPT_remap_file),
ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
std::pair<llvm::StringRef,llvm::StringRef> Split =
- llvm::StringRef(it->getValue(Args)).split(';');
+ llvm::StringRef(A->getValue(Args)).split(';');
if (Split.second.empty()) {
- Diags.Report(diag::err_drv_invalid_remap_file) << it->getAsString(Args);
+ Diags.Report(diag::err_drv_invalid_remap_file) << A->getAsString(Args);
continue;
}
@@ -1366,6 +1423,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
using namespace cc1options;
Opts.ABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi);
Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
Opts.Triple = Args.getLastArgValue(OPT_triple);
Opts.Features = Args.getAllArgValues(OPT_target_feature);
@@ -1373,6 +1431,10 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
// Use the host triple if unspecified.
if (Opts.Triple.empty())
Opts.Triple = llvm::sys::getHostTriple();
+
+ // Use the Itanium C++ ABI if unspecified.
+ if (Opts.CXXABI.empty())
+ Opts.CXXABI = "itanium";
}
//
@@ -1395,16 +1457,15 @@ void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
// Issue errors on unknown arguments.
for (arg_iterator it = Args->filtered_begin(OPT_UNKNOWN),
ie = Args->filtered_end(); it != ie; ++it)
- Diags.Report(diag::err_drv_unknown_argument) << it->getAsString(*Args);
+ Diags.Report(diag::err_drv_unknown_argument) << (*it)->getAsString(*Args);
ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags);
ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, Diags);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, Diags);
- FrontendOptions::InputKind DashX =
- ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
+ InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args);
- if (DashX != FrontendOptions::IK_AST)
+ if (DashX != IK_AST && DashX != IK_LLVM_IR)
ParseLangArgs(Res.getLangOpts(), *Args, DashX, Diags);
ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, Diags);
ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args);
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
index 87fc122..dbbf69c 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -25,25 +25,28 @@ FrontendAction::FrontendAction() : Instance(0) {}
FrontendAction::~FrontendAction() {}
-void FrontendAction::setCurrentFile(llvm::StringRef Value, ASTUnit *AST) {
+void FrontendAction::setCurrentFile(llvm::StringRef Value, InputKind Kind,
+ ASTUnit *AST) {
CurrentFile = Value;
+ CurrentFileKind = Kind;
CurrentASTUnit.reset(AST);
}
bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
llvm::StringRef Filename,
- bool IsAST) {
+ InputKind InputKind) {
assert(!Instance && "Already processing a source file!");
assert(!Filename.empty() && "Unexpected empty filename!");
- setCurrentFile(Filename);
+ setCurrentFile(Filename, InputKind);
setCompilerInstance(&CI);
// AST files follow a very different path, since they share objects via the
// AST unit.
- if (IsAST) {
+ if (InputKind == IK_AST) {
assert(!usesPreprocessorOnly() &&
"Attempt to pass AST file to preprocessor only action!");
- assert(hasASTSupport() && "This action does not have AST support!");
+ assert(hasASTFileSupport() &&
+ "This action does not have AST file support!");
llvm::IntrusiveRefCntPtr<Diagnostic> Diags(&CI.getDiagnostics());
std::string Error;
@@ -51,7 +54,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!AST)
goto failure;
- setCurrentFile(Filename, AST);
+ setCurrentFile(Filename, InputKind, AST);
// Set the shared objects, these are reset when we finish processing the
// file, otherwise the CompilerInstance will happily destroy them.
@@ -72,6 +75,30 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
return true;
}
+ // Set up the file and source managers, if needed.
+ if (!CI.hasFileManager())
+ CI.createFileManager();
+ if (!CI.hasSourceManager())
+ CI.createSourceManager();
+
+ // IR files bypass the rest of initialization.
+ if (InputKind == IK_LLVM_IR) {
+ assert(hasIRSupport() &&
+ "This action does not have IR file support!");
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), 0);
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, Filename))
+ goto failure;
+
+ return true;
+ }
+
+ // Set up the preprocessor.
+ CI.createPreprocessor();
+
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
&CI.getPreprocessor());
@@ -84,11 +111,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
/// action.
if (!usesPreprocessorOnly()) {
CI.createASTContext();
- CI.setASTConsumer(CreateASTConsumer(CI, Filename));
- if (!CI.hasASTConsumer())
- goto failure;
- /// Use PCH?
+ /// Use PCH? If so, we want the PCHReader active before the consumer
+ /// is created, because the consumer might be interested in the reader
+ /// (e.g. the PCH writer for chaining).
if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
assert(hasPCHSupport() && "This action does not have PCH support!");
CI.createPCHExternalASTSource(
@@ -96,6 +122,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!CI.getASTContext().getExternalSource())
goto failure;
}
+
+ CI.setASTConsumer(CreateASTConsumer(CI, Filename));
+ if (!CI.hasASTConsumer())
+ goto failure;
}
// Initialize builtin info as long as we aren't using an external AST
@@ -119,7 +149,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
}
CI.getDiagnosticClient().EndSourceFile();
- setCurrentFile("");
+ setCurrentFile("", IK_None);
setCompilerInstance(0);
return false;
}
@@ -198,7 +228,7 @@ void FrontendAction::EndSourceFile() {
}
setCompilerInstance(0);
- setCurrentFile("");
+ setCurrentFile("", IK_None);
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
index 6cd960b..3a53dee 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -9,14 +9,13 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Parse/Parser.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Frontend/AnalysisConsumer.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FixItRewriter.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "llvm/ADT/OwningPtr.h"
@@ -39,13 +38,6 @@ void InitOnlyAction::ExecuteAction() {
// AST Consumer Actions
//===----------------------------------------------------------------------===//
-ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- return CreateAnalysisConsumer(CI.getPreprocessor(),
- CI.getFrontendOpts().OutputFile,
- CI.getAnalyzerOpts());
-}
-
ASTConsumer *ASTPrintAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
@@ -88,17 +80,11 @@ ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
if (!OS)
return 0;
- if (CI.getFrontendOpts().RelocatablePCH)
- return CreatePCHGenerator(CI.getPreprocessor(), OS, Sysroot.c_str());
-
- return CreatePCHGenerator(CI.getPreprocessor(), OS);
-}
-
-ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
- return CreateHTMLPrinter(OS, CI.getPreprocessor());
- return 0;
+ PCHReader *Chain = CI.getInvocation().getFrontendOpts().ChainedPCH ?
+ CI.getPCHReader() : 0;
+ const char *isysroot = CI.getFrontendOpts().RelocatablePCH ?
+ Sysroot.c_str() : 0;
+ return CreatePCHGenerator(CI.getPreprocessor(), OS, Chain, isysroot);
}
ASTConsumer *InheritanceViewAction::CreateASTConsumer(CompilerInstance &CI,
@@ -106,57 +92,6 @@ ASTConsumer *InheritanceViewAction::CreateASTConsumer(CompilerInstance &CI,
return CreateInheritanceViewer(CI.getFrontendOpts().ViewClassInheritance);
}
-FixItAction::FixItAction() {}
-FixItAction::~FixItAction() {}
-
-ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- return new ASTConsumer();
-}
-
-class FixItActionSuffixInserter : public FixItPathRewriter {
- std::string NewSuffix;
-
-public:
- explicit FixItActionSuffixInserter(std::string NewSuffix)
- : NewSuffix(NewSuffix) {}
-
- std::string RewriteFilename(const std::string &Filename) {
- llvm::sys::Path Path(Filename);
- std::string Suffix = Path.getSuffix();
- Path.eraseSuffix();
- Path.appendSuffix(NewSuffix + "." + Suffix);
- return Path.c_str();
- }
-};
-
-bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
- llvm::StringRef Filename) {
- const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts();
- if (!FEOpts.FixItSuffix.empty()) {
- PathRewriter.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix));
- } else {
- PathRewriter.reset();
- }
- Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
- CI.getLangOpts(), PathRewriter.get()));
- return true;
-}
-
-void FixItAction::EndSourceFileAction() {
- // Otherwise rewrite all files.
- Rewriter->WriteFixedFiles();
-}
-
-ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp"))
- return CreateObjCRewriter(InFile, OS,
- CI.getDiagnostics(), CI.getLangOpts(),
- CI.getDiagnosticOpts().NoRewriteMacros);
- return 0;
-}
-
ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
llvm::StringRef InFile) {
return new ASTConsumer();
@@ -223,6 +158,9 @@ void ParseOnlyAction::ExecuteAction() {
void PreprocessOnlyAction::ExecuteAction() {
Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ // Ignore unknown pragmas.
+ PP.AddPragmaHandler(new EmptyPragmaHandler());
+
Token Tok;
// Start parsing the specified input file.
PP.EnterMainSourceFile();
@@ -254,19 +192,3 @@ void PrintPreprocessedAction::ExecuteAction() {
DoPrintPreprocessedInput(CI.getPreprocessor(), OS,
CI.getPreprocessorOutputOpts());
}
-
-void RewriteMacrosAction::ExecuteAction() {
- CompilerInstance &CI = getCompilerInstance();
- llvm::raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
- if (!OS) return;
-
- RewriteMacrosInInput(CI.getPreprocessor(), OS);
-}
-
-void RewriteTestAction::ExecuteAction() {
- CompilerInstance &CI = getCompilerInstance();
- llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile());
- if (!OS) return;
-
- DoRewriteTest(CI.getPreprocessor(), OS);
-}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
index bd91638..9dfee24 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
@@ -11,8 +11,7 @@
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
-FrontendOptions::InputKind
-FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) {
+InputKind FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) {
return llvm::StringSwitch<InputKind>(Extension)
.Case("ast", IK_AST)
.Case("c", IK_C)
@@ -27,5 +26,6 @@ FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) {
.Cases("C", "cc", "cp", IK_CXX)
.Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX)
.Case("cl", IK_OpenCL)
+ .Cases("ll", "bc", IK_LLVM_IR)
.Default(IK_C);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp
index 6251bac..2f3df94 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp
@@ -32,20 +32,24 @@ namespace {
llvm::raw_ostream *Out;
Sema *SemaPtr;
MemorizeStatCalls *StatCalls; // owned by the FileManager
+ std::vector<unsigned char> Buffer;
+ llvm::BitstreamWriter Stream;
+ PCHWriter Writer;
public:
- explicit PCHGenerator(const Preprocessor &PP,
- const char *isysroot,
- llvm::raw_ostream *Out);
+ PCHGenerator(const Preprocessor &PP, PCHReader *Chain,
+ const char *isysroot, llvm::raw_ostream *Out);
virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
virtual void HandleTranslationUnit(ASTContext &Ctx);
};
}
PCHGenerator::PCHGenerator(const Preprocessor &PP,
+ PCHReader *Chain,
const char *isysroot,
llvm::raw_ostream *OS)
- : PP(PP), isysroot(isysroot), Out(OS), SemaPtr(0), StatCalls(0) {
+ : PP(PP), isysroot(isysroot), Out(OS), SemaPtr(0), StatCalls(0),
+ Stream(Buffer), Writer(Stream, Chain) {
// Install a stat() listener to keep track of all of the stat()
// calls.
@@ -57,11 +61,6 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
if (PP.getDiagnostics().hasErrorOccurred())
return;
- // Write the PCH contents into a buffer
- std::vector<unsigned char> Buffer;
- llvm::BitstreamWriter Stream(Buffer);
- PCHWriter Writer(Stream);
-
// Emit the PCH file
assert(SemaPtr && "No Sema?");
Writer.WritePCH(*SemaPtr, StatCalls, isysroot);
@@ -71,10 +70,14 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Make sure it hits disk now.
Out->flush();
+
+ // Free up some memory, in case the process is kept alive.
+ Buffer.clear();
}
ASTConsumer *clang::CreatePCHGenerator(const Preprocessor &PP,
llvm::raw_ostream *OS,
+ PCHReader *Chain,
const char *isysroot) {
- return new PCHGenerator(PP, isysroot, OS);
+ return new PCHGenerator(PP, Chain, isysroot, OS);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index 9490705..fcfee712 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -550,6 +550,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
System, true, false, false);
AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++",
System, true, false, false);
+ AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++/i686-pc-cygwin",
+ System, true, false, false);
break;
case llvm::Triple::MinGW64:
// Try gcc 4.4.0
@@ -564,10 +566,35 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
break;
case llvm::Triple::Darwin:
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
- "i686-apple-darwin10", "", "x86_64", triple);
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
- "i686-apple-darwin8", "", "", triple);
+ switch (triple.getArch()) {
+ default: break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "i686-apple-darwin10", "", "x86_64", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "i686-apple-darwin8", "", "", triple);
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v7", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v6", "", triple);
+ break;
+ }
break;
case llvm::Triple::DragonFly:
AddPath("/usr/include/c++/4.1", System, true, false, false);
@@ -591,6 +618,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
"x86_64-linux-gnu", "32", "", triple);
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3",
"i486-linux-gnu", "", "64", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3",
+ "arm-linux-gnueabi", "", "", triple);
// Ubuntu 8.04.4 LTS "Hardy Heron" -- gcc-4.2.4
// Ubuntu 8.04.[0-3] LTS "Hardy Heron" -- gcc-4.2.3
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2",
@@ -607,6 +636,10 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
// Redhat based distros.
//===------------------------------------------------------------------===//
// Fedora 13
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4",
+ "x86_64-redhat-linux", "32", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4",
+ "i686-redhat-linux","", "", triple);
// Fedora 12
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.3",
"x86_64-redhat-linux", "32", "", triple);
@@ -694,6 +727,11 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) {
// FreeBSD 8.0
// FreeBSD 7.3
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2", "", "", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2/backward", "", "", "", triple);
+ break;
+ case llvm::Triple::Minix:
+ AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
+ "", "", "", triple);
break;
case llvm::Triple::Solaris:
// Solaris - Fall though..
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
index 2b35c8e..889b6e5 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -83,8 +83,8 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder,
static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
llvm::StringRef ImplicitIncludePTH) {
PTHManager *P = PP.getPTHManager();
- assert(P && "No PTHManager.");
- const char *OriginalFile = P->getOriginalSourceFile();
+ // Null check 'P' in the corner case where it couldn't be created.
+ const char *OriginalFile = P ? P->getOriginalSourceFile() : 0;
if (!OriginalFile) {
PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
@@ -195,9 +195,21 @@ static void DefineTypeWidth(llvm::StringRef MacroName, TargetInfo::IntType Ty,
Builder.defineMacro(MacroName, llvm::Twine(TI.getTypeWidth(Ty)));
}
+static void DefineTypeSizeof(llvm::StringRef MacroName, unsigned BitWidth,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName,
+ llvm::Twine(BitWidth / TI.getCharWidth()));
+}
+
static void DefineExactWidthIntType(TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
int TypeWidth = TI.getTypeWidth(Ty);
+
+ // Use the target specified int64 type, when appropriate, so that [u]int64_t
+ // ends up being defined in terms of the correct type.
+ if (TypeWidth == 64)
+ Ty = TI.getInt64Type();
+
DefineType("__INT" + llvm::Twine(TypeWidth) + "_TYPE__", Ty, Builder);
llvm::StringRef ConstSuffix(TargetInfo::getTypeConstantSuffix(Ty));
@@ -293,6 +305,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.Exceptions)
Builder.defineMacro("__EXCEPTIONS");
+ if (LangOpts.RTTI)
+ Builder.defineMacro("__GXX_RTTI");
if (LangOpts.SjLjExceptions)
Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__");
@@ -350,6 +364,23 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_INT__", TI.getIntWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_PTRDIFF_T__",
+ TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SIZE_T__",
+ TI.getTypeWidth(TI.getSizeType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WCHAR_T__",
+ TI.getTypeWidth(TI.getWCharType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WINT_T__",
+ TI.getTypeWidth(TI.getWIntType()), TI, Builder);
+
DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder);
DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
@@ -364,6 +395,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
+ DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
+ DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat());
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat());
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Makefile b/contrib/llvm/tools/clang/lib/Frontend/Makefile
index 9e1a864..3eb4bc9 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Makefile
+++ b/contrib/llvm/tools/clang/lib/Frontend/Makefile
@@ -7,11 +7,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangFrontend
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp
index 88e9b9d..00aee49 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp
@@ -13,6 +13,7 @@
#include "clang/Frontend/PCHReader.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/PCHDeserializationListener.h"
#include "clang/Frontend/Utils.h"
#include "../Sema/Sema.h" // FIXME: move Sema headers elsewhere
#include "clang/AST/ASTConsumer.h"
@@ -93,7 +94,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(Blocks, diag::warn_pch_blocks);
PARSE_LANGOPT_BENIGN(EmitAllDecls);
PARSE_LANGOPT_IMPORTANT(MathErrno, diag::warn_pch_math_errno);
- PARSE_LANGOPT_IMPORTANT(OverflowChecking, diag::warn_pch_overflow_checking);
+ PARSE_LANGOPT_BENIGN(getSignedOverflowBehavior());
PARSE_LANGOPT_IMPORTANT(HeinousExtensions,
diag::warn_pch_heinous_extensions);
// FIXME: Most of the options below are benign if the macro wasn't
@@ -124,6 +125,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
PARSE_LANGOPT_IMPORTANT(OpenCL, diag::warn_pch_opencl);
PARSE_LANGOPT_BENIGN(CatchUndefined);
PARSE_LANGOPT_IMPORTANT(ElideConstructors, diag::warn_pch_elide_constructors);
+ PARSE_LANGOPT_BENIGN(SpellChecking);
#undef PARSE_LANGOPT_IMPORTANT
#undef PARSE_LANGOPT_BENIGN
@@ -139,8 +141,86 @@ bool PCHValidator::ReadTargetTriple(llvm::StringRef Triple) {
return true;
}
-bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef,
- FileID PCHBufferID,
+struct EmptyStringRef {
+ bool operator ()(llvm::StringRef r) const { return r.empty(); }
+};
+struct EmptyBlock {
+ bool operator ()(const PCHPredefinesBlock &r) const { return r.Data.empty(); }
+};
+
+static bool EqualConcatenations(llvm::SmallVector<llvm::StringRef, 2> L,
+ PCHPredefinesBlocks R) {
+ // First, sum up the lengths.
+ unsigned LL = 0, RL = 0;
+ for (unsigned I = 0, N = L.size(); I != N; ++I) {
+ LL += L[I].size();
+ }
+ for (unsigned I = 0, N = R.size(); I != N; ++I) {
+ RL += R[I].Data.size();
+ }
+ if (LL != RL)
+ return false;
+ if (LL == 0 && RL == 0)
+ return true;
+
+ // Kick out empty parts, they confuse the algorithm below.
+ L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end());
+ R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end());
+
+ // Do it the hard way. At this point, both vectors must be non-empty.
+ llvm::StringRef LR = L[0], RR = R[0].Data;
+ unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size();
+ for (;;) {
+ // Compare the current pieces.
+ if (LR.size() == RR.size()) {
+ // If they're the same length, it's pretty easy.
+ if (LR != RR)
+ return false;
+ // Both pieces are done, advance.
+ ++LI;
+ ++RI;
+ // If either string is done, they're both done, since they're the same
+ // length.
+ if (LI == LN) {
+ assert(RI == RN && "Strings not the same length after all?");
+ return true;
+ }
+ LR = L[LI];
+ RR = R[RI].Data;
+ } else if (LR.size() < RR.size()) {
+ // Right piece is longer.
+ if (!RR.startswith(LR))
+ return false;
+ ++LI;
+ assert(LI != LN && "Strings not the same length after all?");
+ RR = RR.substr(LR.size());
+ LR = L[LI];
+ } else {
+ // Left piece is longer.
+ if (!LR.startswith(RR))
+ return false;
+ ++RI;
+ assert(RI != RN && "Strings not the same length after all?");
+ LR = LR.substr(RR.size());
+ RR = R[RI].Data;
+ }
+ }
+}
+
+static std::pair<FileID, llvm::StringRef::size_type>
+FindMacro(const PCHPredefinesBlocks &Buffers, llvm::StringRef MacroDef) {
+ std::pair<FileID, llvm::StringRef::size_type> Res;
+ for (unsigned I = 0, N = Buffers.size(); I != N; ++I) {
+ Res.second = Buffers[I].Data.find(MacroDef);
+ if (Res.second != llvm::StringRef::npos) {
+ Res.first = Buffers[I].BufferID;
+ break;
+ }
+ }
+ return Res;
+}
+
+bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
llvm::StringRef OriginalFileName,
std::string &SuggestedPredefines) {
// We are in the context of an implicit include, so the predefines buffer will
@@ -159,9 +239,15 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef,
return true;
}
- // If the predefines is equal to the joined left and right halves, we're done!
- if (Left.size() + Right.size() == PCHPredef.size() &&
- PCHPredef.startswith(Left) && PCHPredef.endswith(Right))
+ // If the concatenation of all the PCH buffers is equal to the adjusted
+ // command line, we're done.
+ // We build a SmallVector of the command line here, because we'll eventually
+ // need to support an arbitrary amount of pieces anyway (when we have chained
+ // PCH reading).
+ llvm::SmallVector<llvm::StringRef, 2> CommandLine;
+ CommandLine.push_back(Left);
+ CommandLine.push_back(Right);
+ if (EqualConcatenations(CommandLine, Buffers))
return false;
SourceManager &SourceMgr = PP.getSourceManager();
@@ -169,7 +255,8 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef,
// The predefines buffers are different. Determine what the differences are,
// and whether they require us to reject the PCH file.
llvm::SmallVector<llvm::StringRef, 8> PCHLines;
- PCHPredef.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (unsigned I = 0, N = Buffers.size(); I != N; ++I)
+ Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
llvm::SmallVector<llvm::StringRef, 8> CmdLineLines;
Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
@@ -234,10 +321,12 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef,
<< MacroName;
// Show the definition of this macro within the PCH file.
- llvm::StringRef::size_type Offset = PCHPredef.find(Missing);
- assert(Offset != llvm::StringRef::npos && "Unable to find macro!");
- SourceLocation PCHMissingLoc = SourceMgr.getLocForStartOfFile(PCHBufferID)
- .getFileLocWithOffset(Offset);
+ std::pair<FileID, llvm::StringRef::size_type> MacroLoc =
+ FindMacro(Buffers, Missing);
+ assert(MacroLoc.second!=llvm::StringRef::npos && "Unable to find macro!");
+ SourceLocation PCHMissingLoc =
+ SourceMgr.getLocForStartOfFile(MacroLoc.first)
+ .getFileLocWithOffset(MacroLoc.second);
Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName;
ConflictingDefines = true;
@@ -255,10 +344,12 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef,
}
// Show the definition of this macro within the PCH file.
- llvm::StringRef::size_type Offset = PCHPredef.find(Missing);
- assert(Offset != llvm::StringRef::npos && "Unable to find macro!");
- SourceLocation PCHMissingLoc = SourceMgr.getLocForStartOfFile(PCHBufferID)
- .getFileLocWithOffset(Offset);
+ std::pair<FileID, llvm::StringRef::size_type> MacroLoc =
+ FindMacro(Buffers, Missing);
+ assert(MacroLoc.second!=llvm::StringRef::npos && "Unable to find macro!");
+ SourceLocation PCHMissingLoc =
+ SourceMgr.getLocForStartOfFile(MacroLoc.first)
+ .getFileLocWithOffset(MacroLoc.second);
Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch);
}
@@ -323,10 +414,10 @@ void PCHValidator::ReadCounter(unsigned Value) {
PCHReader::PCHReader(Preprocessor &PP, ASTContext *Context,
const char *isysroot)
- : Listener(new PCHValidator(PP, *this)), SourceMgr(PP.getSourceManager()),
- FileMgr(PP.getFileManager()), Diags(PP.getDiagnostics()),
- SemaObj(0), PP(&PP), Context(Context), StatCache(0), Consumer(0),
- IdentifierTableData(0), IdentifierLookupTable(0),
+ : Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
+ SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
+ Diags(PP.getDiagnostics()), SemaObj(0), PP(&PP), Context(Context),
+ StatCache(0), Consumer(0), IdentifierTableData(0), IdentifierLookupTable(0),
IdentifierOffsets(0),
MethodPoolLookupTable(0), MethodPoolLookupTableData(0),
TotalSelectorsInMethodPool(0), SelectorOffsets(0),
@@ -342,8 +433,8 @@ PCHReader::PCHReader(Preprocessor &PP, ASTContext *Context,
PCHReader::PCHReader(SourceManager &SourceMgr, FileManager &FileMgr,
Diagnostic &Diags, const char *isysroot)
- : SourceMgr(SourceMgr), FileMgr(FileMgr), Diags(Diags),
- SemaObj(0), PP(0), Context(0), StatCache(0), Consumer(0),
+ : DeserializationListener(0), SourceMgr(SourceMgr), FileMgr(FileMgr),
+ Diags(Diags), SemaObj(0), PP(0), Context(0), StatCache(0), Consumer(0),
IdentifierTableData(0), IdentifierLookupTable(0),
IdentifierOffsets(0),
MethodPoolLookupTable(0), MethodPoolLookupTableData(0),
@@ -360,14 +451,6 @@ PCHReader::PCHReader(SourceManager &SourceMgr, FileManager &FileMgr,
PCHReader::~PCHReader() {}
-Expr *PCHReader::ReadDeclExpr() {
- return dyn_cast_or_null<Expr>(ReadStmt(DeclsCursor));
-}
-
-Expr *PCHReader::ReadTypeExpr() {
- return dyn_cast_or_null<Expr>(ReadStmt(DeclsCursor));
-}
-
namespace {
class PCHMethodPoolLookupTrait {
@@ -616,27 +699,18 @@ void PCHReader::Error(const char *Msg) {
Diag(diag::err_fe_pch_malformed) << Msg;
}
-/// \brief Check the contents of the predefines buffer against the
-/// contents of the predefines buffer used to build the PCH file.
-///
-/// The contents of the two predefines buffers should be the same. If
-/// not, then some command-line option changed the preprocessor state
-/// and we must reject the PCH file.
-///
-/// \param PCHPredef The start of the predefines buffer in the PCH
-/// file.
-///
-/// \param PCHPredefLen The length of the predefines buffer in the PCH
-/// file.
+/// \brief Check the contents of the concatenation of all predefines buffers in
+/// the PCH chain against the contents of the predefines buffer of the current
+/// compiler invocation.
///
-/// \param PCHBufferID The FileID for the PCH predefines buffer.
+/// The contents should be the same. If not, then some command-line option
+/// changed the preprocessor state and we must probably reject the PCH file.
///
/// \returns true if there was a mismatch (in which case the PCH file
/// should be ignored), or false otherwise.
-bool PCHReader::CheckPredefinesBuffer(llvm::StringRef PCHPredef,
- FileID PCHBufferID) {
+bool PCHReader::CheckPredefinesBuffers() {
if (Listener)
- return Listener->ReadPredefinesBuffer(PCHPredef, PCHBufferID,
+ return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers,
ActualOriginalFileName,
SuggestedPredefines);
return false;
@@ -667,16 +741,17 @@ bool PCHReader::ParseLineTable(llvm::SmallVectorImpl<uint64_t> &Record) {
// Parse the line entries
std::vector<LineEntry> Entries;
while (Idx < Record.size()) {
- int FID = FileIDs[Record[Idx++]];
+ int FID = Record[Idx++];
// Extract the line entries
unsigned NumEntries = Record[Idx++];
+ assert(NumEntries && "Numentries is 00000");
Entries.clear();
Entries.reserve(NumEntries);
for (unsigned I = 0; I != NumEntries; ++I) {
unsigned FileOffset = Record[Idx++];
unsigned LineNo = Record[Idx++];
- int FilenameID = Record[Idx++];
+ int FilenameID = FileIDs[Record[Idx++]];
SrcMgr::CharacteristicKind FileKind
= (SrcMgr::CharacteristicKind)Record[Idx++];
unsigned IncludeOffset = Record[Idx++];
@@ -964,9 +1039,11 @@ PCHReader::PCHReadResult PCHReader::ReadSLocEntryRecord(unsigned ID) {
FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID, Offset);
if (strcmp(Name, "<built-in>") == 0) {
- PCHPredefinesBufferID = BufferID;
- PCHPredefines = BlobStart;
- PCHPredefinesLen = BlobLen - 1;
+ PCHPredefinesBlock Block = {
+ BufferID,
+ llvm::StringRef(BlobStart, BlobLen - 1)
+ };
+ PCHPredefinesBuffers.push_back(Block);
}
break;
@@ -1512,6 +1589,22 @@ PCHReader::ReadPCHBlock() {
ExtVectorDecls.swap(Record);
break;
+ case pch::VTABLE_USES:
+ if (!VTableUses.empty()) {
+ Error("duplicate VTABLE_USES record in PCH file");
+ return Failure;
+ }
+ VTableUses.swap(Record);
+ break;
+
+ case pch::DYNAMIC_CLASSES:
+ if (!DynamicClasses.empty()) {
+ Error("duplicate DYNAMIC_CLASSES record in PCH file");
+ return Failure;
+ }
+ DynamicClasses.swap(Record);
+ break;
+
case pch::ORIGINAL_FILE_NAME:
ActualOriginalFileName.assign(BlobStart, BlobLen);
OriginalFileName = ActualOriginalFileName;
@@ -1626,8 +1719,7 @@ PCHReader::PCHReadResult PCHReader::ReadPCH(const std::string &FileName) {
}
// Check the predefines buffer.
- if (CheckPredefinesBuffer(llvm::StringRef(PCHPredefines, PCHPredefinesLen),
- PCHPredefinesBufferID))
+ if (CheckPredefinesBuffers())
return IgnorePCH;
if (PP) {
@@ -1693,7 +1785,7 @@ void PCHReader::InitializeContext(ASTContext &Ctx) {
PP->setExternalSource(this);
// Load the translation unit declaration
- ReadDeclRecord(DeclOffsets[0], 0);
+ GetTranslationUnitDecl();
// Load the special types.
Context->setBuiltinVaListType(
@@ -1776,6 +1868,9 @@ void PCHReader::InitializeContext(ASTContext &Ctx) {
Context->ObjCSelRedefinitionType = GetType(ObjCSelRedef);
if (unsigned String = SpecialTypes[pch::SPECIAL_TYPE_NS_CONSTANT_STRING])
Context->setNSConstantStringType(GetType(String));
+
+ if (SpecialTypes[pch::SPECIAL_TYPE_INT128_INSTALLED])
+ Context->setInt128Installed();
}
/// \brief Retrieve the name of the original source file name
@@ -1915,7 +2010,8 @@ bool PCHReader::ParseLanguageOptions(
PARSE_LANGOPT(Blocks);
PARSE_LANGOPT(EmitAllDecls);
PARSE_LANGOPT(MathErrno);
- PARSE_LANGOPT(OverflowChecking);
+ LangOpts.setSignedOverflowBehavior((LangOptions::SignedOverflowBehaviorTy)
+ Record[Idx++]);
PARSE_LANGOPT(HeinousExtensions);
PARSE_LANGOPT(Optimize);
PARSE_LANGOPT(OptimizeSize);
@@ -1926,13 +2022,10 @@ bool PCHReader::ParseLanguageOptions(
PARSE_LANGOPT(AccessControl);
PARSE_LANGOPT(CharIsSigned);
PARSE_LANGOPT(ShortWChar);
- LangOpts.setGCMode((LangOptions::GCMode)Record[Idx]);
- ++Idx;
- LangOpts.setVisibilityMode((LangOptions::VisibilityMode)Record[Idx]);
- ++Idx;
+ LangOpts.setGCMode((LangOptions::GCMode)Record[Idx++]);
+ LangOpts.setVisibilityMode((LangOptions::VisibilityMode)Record[Idx++]);
LangOpts.setStackProtectorMode((LangOptions::StackProtectorMode)
- Record[Idx]);
- ++Idx;
+ Record[Idx++]);
PARSE_LANGOPT(InstantiationDepth);
PARSE_LANGOPT(OpenCL);
PARSE_LANGOPT(CatchUndefined);
@@ -1959,6 +2052,8 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
// after reading this type.
SavedStreamPosition SavedPosition(DeclsCursor);
+ ReadingKindTracker ReadingKind(Read_Type, *this);
+
// Note that we are loading a type record.
LoadingTypeOrDecl Loading(*this);
@@ -2022,7 +2117,7 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
}
case pch::TYPE_MEMBER_POINTER: {
- if (Record.size() != 1) {
+ if (Record.size() != 2) {
Error("Incorrect encoding of member pointer type");
return QualType();
}
@@ -2054,26 +2149,26 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
unsigned IndexTypeQuals = Record[2];
SourceLocation LBLoc = SourceLocation::getFromRawEncoding(Record[3]);
SourceLocation RBLoc = SourceLocation::getFromRawEncoding(Record[4]);
- return Context->getVariableArrayType(ElementType, ReadTypeExpr(),
+ return Context->getVariableArrayType(ElementType, ReadExpr(),
ASM, IndexTypeQuals,
SourceRange(LBLoc, RBLoc));
}
case pch::TYPE_VECTOR: {
- if (Record.size() != 4) {
+ if (Record.size() != 3) {
Error("incorrect encoding of vector type in PCH file");
return QualType();
}
QualType ElementType = GetType(Record[0]);
unsigned NumElements = Record[1];
- bool AltiVec = Record[2];
- bool Pixel = Record[3];
- return Context->getVectorType(ElementType, NumElements, AltiVec, Pixel);
+ unsigned AltiVecSpec = Record[2];
+ return Context->getVectorType(ElementType, NumElements,
+ (VectorType::AltiVecSpecific)AltiVecSpec);
}
case pch::TYPE_EXT_VECTOR: {
- if (Record.size() != 4) {
+ if (Record.size() != 3) {
Error("incorrect encoding of extended vector type in PCH file");
return QualType();
}
@@ -2123,15 +2218,18 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
return Context->getTypeDeclType(
cast<UnresolvedUsingTypenameDecl>(GetDecl(Record[0])));
- case pch::TYPE_TYPEDEF:
- if (Record.size() != 1) {
+ case pch::TYPE_TYPEDEF: {
+ if (Record.size() != 2) {
Error("incorrect encoding of typedef type");
return QualType();
}
- return Context->getTypeDeclType(cast<TypedefDecl>(GetDecl(Record[0])));
+ TypedefDecl *Decl = cast<TypedefDecl>(GetDecl(Record[0]));
+ QualType Canonical = GetType(Record[1]);
+ return Context->getTypedefType(Decl, Canonical);
+ }
case pch::TYPE_TYPEOF_EXPR:
- return Context->getTypeOfExprType(ReadTypeExpr());
+ return Context->getTypeOfExprType(ReadExpr());
case pch::TYPE_TYPEOF: {
if (Record.size() != 1) {
@@ -2143,32 +2241,36 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
}
case pch::TYPE_DECLTYPE:
- return Context->getDecltypeType(ReadTypeExpr());
+ return Context->getDecltypeType(ReadExpr());
- case pch::TYPE_RECORD:
- if (Record.size() != 1) {
+ case pch::TYPE_RECORD: {
+ if (Record.size() != 2) {
Error("incorrect encoding of record type");
return QualType();
}
- return Context->getTypeDeclType(cast<RecordDecl>(GetDecl(Record[0])));
+ bool IsDependent = Record[0];
+ QualType T = Context->getRecordType(cast<RecordDecl>(GetDecl(Record[1])));
+ T->Dependent = IsDependent;
+ return T;
+ }
- case pch::TYPE_ENUM:
- if (Record.size() != 1) {
+ case pch::TYPE_ENUM: {
+ if (Record.size() != 2) {
Error("incorrect encoding of enum type");
return QualType();
}
- return Context->getTypeDeclType(cast<EnumDecl>(GetDecl(Record[0])));
+ bool IsDependent = Record[0];
+ QualType T = Context->getEnumType(cast<EnumDecl>(GetDecl(Record[1])));
+ T->Dependent = IsDependent;
+ return T;
+ }
case pch::TYPE_ELABORATED: {
- if (Record.size() != 2) {
- Error("incorrect encoding of elaborated type");
- return QualType();
- }
- unsigned Tag = Record[1];
- // FIXME: Deserialize the qualifier (C++ only)
- return Context->getElaboratedType((ElaboratedTypeKeyword) Tag,
- /* NNS */ 0,
- GetType(Record[0]));
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
+ QualType NamedType = GetType(Record[Idx++]);
+ return Context->getElaboratedType(Keyword, NNS, NamedType);
}
case pch::TYPE_OBJC_INTERFACE: {
@@ -2205,7 +2307,77 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
case pch::TYPE_INJECTED_CLASS_NAME: {
CXXRecordDecl *D = cast<CXXRecordDecl>(GetDecl(Record[0]));
QualType TST = GetType(Record[1]); // probably derivable
- return Context->getInjectedClassNameType(D, TST);
+ // FIXME: ASTContext::getInjectedClassNameType is not currently suitable
+ // for PCH reading, too much interdependencies.
+ return
+ QualType(new (*Context, TypeAlignment) InjectedClassNameType(D, TST), 0);
+ }
+
+ case pch::TYPE_TEMPLATE_TYPE_PARM: {
+ unsigned Idx = 0;
+ unsigned Depth = Record[Idx++];
+ unsigned Index = Record[Idx++];
+ bool Pack = Record[Idx++];
+ IdentifierInfo *Name = GetIdentifierInfo(Record, Idx);
+ return Context->getTemplateTypeParmType(Depth, Index, Pack, Name);
+ }
+
+ case pch::TYPE_DEPENDENT_NAME: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
+ const IdentifierInfo *Name = this->GetIdentifierInfo(Record, Idx);
+ QualType Canon = GetType(Record[Idx++]);
+ return Context->getDependentNameType(Keyword, NNS, Name, Canon);
+ }
+
+ case pch::TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
+ const IdentifierInfo *Name = this->GetIdentifierInfo(Record, Idx);
+ unsigned NumArgs = Record[Idx++];
+ llvm::SmallVector<TemplateArgument, 8> Args;
+ Args.reserve(NumArgs);
+ while (NumArgs--)
+ Args.push_back(ReadTemplateArgument(Record, Idx));
+ return Context->getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ Args.size(), Args.data());
+ }
+
+ case pch::TYPE_DEPENDENT_SIZED_ARRAY: {
+ unsigned Idx = 0;
+
+ // ArrayType
+ QualType ElementType = GetType(Record[Idx++]);
+ ArrayType::ArraySizeModifier ASM
+ = (ArrayType::ArraySizeModifier)Record[Idx++];
+ unsigned IndexTypeQuals = Record[Idx++];
+
+ // DependentSizedArrayType
+ Expr *NumElts = ReadExpr();
+ SourceRange Brackets = ReadSourceRange(Record, Idx);
+
+ return Context->getDependentSizedArrayType(ElementType, NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ }
+
+ case pch::TYPE_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ TemplateName Name = ReadTemplateName(Record, Idx);
+ llvm::SmallVector<TemplateArgument, 8> Args;
+ ReadTemplateArgumentList(Args, Record, Idx);
+ QualType Canon = GetType(Record[Idx++]);
+ QualType T;
+ if (Canon.isNull())
+ T = Context->getCanonicalTemplateSpecializationType(Name, Args.data(),
+ Args.size());
+ else
+ T = Context->getTemplateSpecializationType(Name, Args.data(),
+ Args.size(), Canon);
+ T->Dependent = IsDependent;
+ return T;
}
}
// Suppress a GCC warning
@@ -2272,7 +2444,7 @@ void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
TL.setLBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
TL.setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
if (Record[Idx++])
- TL.setSizeExpr(Reader.ReadDeclExpr());
+ TL.setSizeExpr(Reader.ReadExpr());
else
TL.setSizeExpr(0);
}
@@ -2367,6 +2539,18 @@ void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx));
TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
}
+void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ TL.setKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setLAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TL.setRAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ TL.setArgLocInfo(I,
+ Reader.GetTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
+ Record, Idx));
+}
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
}
@@ -2443,8 +2627,12 @@ QualType PCHReader::GetType(pch::TypeID ID) {
Index -= pch::NUM_PREDEF_TYPE_IDS;
//assert(Index < TypesLoaded.size() && "Type index out-of-range");
- if (TypesLoaded[Index].isNull())
+ if (TypesLoaded[Index].isNull()) {
TypesLoaded[Index] = ReadTypeRecord(TypeOffsets[Index]);
+ TypesLoaded[Index]->setFromPCH();
+ if (DeserializationListener)
+ DeserializationListener->TypeRead(ID, TypesLoaded[Index]);
+ }
return TypesLoaded[Index].withFastQualifiers(FastQuals);
}
@@ -2455,16 +2643,13 @@ PCHReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
unsigned &Index) {
switch (Kind) {
case TemplateArgument::Expression:
- return ReadDeclExpr();
+ return ReadExpr();
case TemplateArgument::Type:
return GetTypeSourceInfo(Record, Index);
case TemplateArgument::Template: {
- SourceLocation
- QualStart = SourceLocation::getFromRawEncoding(Record[Index++]),
- QualEnd = SourceLocation::getFromRawEncoding(Record[Index++]),
- TemplateNameLoc = SourceLocation::getFromRawEncoding(Record[Index++]);
- return TemplateArgumentLocInfo(SourceRange(QualStart, QualEnd),
- TemplateNameLoc);
+ SourceRange QualifierRange = ReadSourceRange(Record, Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(Record, Index);
+ return TemplateArgumentLocInfo(QualifierRange, TemplateNameLoc);
}
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -2476,6 +2661,32 @@ PCHReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
return TemplateArgumentLocInfo();
}
+TemplateArgumentLoc
+PCHReader::ReadTemplateArgumentLoc(const RecordData &Record, unsigned &Index) {
+ TemplateArgument Arg = ReadTemplateArgument(Record, Index);
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ if (Record[Index++]) // bool InfoHasSameExpr.
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr()));
+ }
+ return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(Arg.getKind(),
+ Record, Index));
+}
+
+Decl *PCHReader::GetExternalDecl(uint32_t ID) {
+ return GetDecl(ID);
+}
+
+TranslationUnitDecl *PCHReader::GetTranslationUnitDecl() {
+ if (!DeclsLoaded[0]) {
+ ReadDeclRecord(DeclOffsets[0], 0);
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(0, DeclsLoaded[0]);
+ }
+
+ return cast<TranslationUnitDecl>(DeclsLoaded[0]);
+}
+
Decl *PCHReader::GetDecl(pch::DeclID ID) {
if (ID == 0)
return 0;
@@ -2486,8 +2697,11 @@ Decl *PCHReader::GetDecl(pch::DeclID ID) {
}
unsigned Index = ID - 1;
- if (!DeclsLoaded[Index])
+ if (!DeclsLoaded[Index]) {
ReadDeclRecord(DeclOffsets[Index], Index);
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(ID, DeclsLoaded[Index]);
+ }
return DeclsLoaded[Index];
}
@@ -2497,15 +2711,15 @@ Decl *PCHReader::GetDecl(pch::DeclID ID) {
/// This operation will read a new statement from the external
/// source each time it is called, and is meant to be used via a
/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
-Stmt *PCHReader::GetDeclStmt(uint64_t Offset) {
+Stmt *PCHReader::GetExternalDeclStmt(uint64_t Offset) {
// Since we know tha this statement is part of a decl, make sure to use the
// decl cursor to read it.
DeclsCursor.JumpToBit(Offset);
- return ReadStmt(DeclsCursor);
+ return ReadStmtFromStream(DeclsCursor);
}
-bool PCHReader::ReadDeclsLexicallyInContext(DeclContext *DC,
- llvm::SmallVectorImpl<pch::DeclID> &Decls) {
+bool PCHReader::FindExternalLexicalDecls(const DeclContext *DC,
+ llvm::SmallVectorImpl<Decl*> &Decls) {
assert(DC->hasExternalLexicalStorage() &&
"DeclContext has no lexical decls in storage");
@@ -2531,20 +2745,22 @@ bool PCHReader::ReadDeclsLexicallyInContext(DeclContext *DC,
}
// Load all of the declaration IDs
- Decls.clear();
- Decls.insert(Decls.end(), Record.begin(), Record.end());
+ for (RecordData::iterator I = Record.begin(), E = Record.end(); I != E; ++I)
+ Decls.push_back(GetDecl(*I));
++NumLexicalDeclContextsRead;
return false;
}
-bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC,
- llvm::SmallVectorImpl<VisibleDeclaration> &Decls) {
+DeclContext::lookup_result
+PCHReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
assert(DC->hasExternalVisibleStorage() &&
"DeclContext has no visible decls in storage");
uint64_t Offset = DeclContextOffsets[DC].second;
if (Offset == 0) {
Error("DeclContext has no visible decls in storage");
- return true;
+ return DeclContext::lookup_result(DeclContext::lookup_iterator(),
+ DeclContext::lookup_iterator());
}
// Keep track of where we are in the stream, then jump back there
@@ -2559,13 +2775,16 @@ bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC,
unsigned RecCode = DeclsCursor.ReadRecord(Code, Record);
if (RecCode != pch::DECL_CONTEXT_VISIBLE) {
Error("Expected visible block");
- return true;
+ return DeclContext::lookup_result(DeclContext::lookup_iterator(),
+ DeclContext::lookup_iterator());
}
- if (Record.size() == 0)
- return false;
-
- Decls.clear();
+ llvm::SmallVector<VisibleDeclaration, 64> Decls;
+ if (Record.empty()) {
+ SetExternalVisibleDecls(DC, Decls);
+ return DeclContext::lookup_result(DeclContext::lookup_iterator(),
+ DeclContext::lookup_iterator());
+ }
unsigned Idx = 0;
while (Idx < Record.size()) {
@@ -2580,7 +2799,18 @@ bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC,
}
++NumVisibleDeclContextsRead;
- return false;
+
+ SetExternalVisibleDecls(DC, Decls);
+ return const_cast<DeclContext*>(DC)->lookup(Name);
+}
+
+void PCHReader::PassInterestingDeclsToConsumer() {
+ assert(Consumer);
+ while (!InterestingDecls.empty()) {
+ DeclGroupRef DG(InterestingDecls.front());
+ InterestingDecls.pop_front();
+ Consumer->HandleTopLevelDecl(DG);
+ }
}
void PCHReader::StartTranslationUnit(ASTConsumer *Consumer) {
@@ -2590,15 +2820,12 @@ void PCHReader::StartTranslationUnit(ASTConsumer *Consumer) {
return;
for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) {
- // Force deserialization of this decl, which will cause it to be passed to
- // the consumer (or queued).
+ // Force deserialization of this decl, which will cause it to be queued for
+ // passing to the consumer.
GetDecl(ExternalDefinitions[I]);
}
- for (unsigned I = 0, N = InterestingDecls.size(); I != N; ++I) {
- DeclGroupRef DG(InterestingDecls[I]);
- Consumer->HandleTopLevelDecl(DG);
- }
+ PassInterestingDeclsToConsumer();
}
void PCHReader::PrintStats() {
@@ -2708,6 +2935,26 @@ void PCHReader::InitializeSema(Sema &S) {
for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I)
SemaObj->ExtVectorDecls.push_back(
cast<TypedefDecl>(GetDecl(ExtVectorDecls[I])));
+
+ // FIXME: Do VTable uses and dynamic classes deserialize too much ?
+ // Can we cut them down before writing them ?
+
+ // If there were any VTable uses, deserialize the information and add it
+ // to Sema's vector and map of VTable uses.
+ unsigned Idx = 0;
+ for (unsigned I = 0, N = VTableUses[Idx++]; I != N; ++I) {
+ CXXRecordDecl *Class = cast<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
+ SourceLocation Loc = ReadSourceLocation(VTableUses, Idx);
+ bool DefinitionRequired = VTableUses[Idx++];
+ SemaObj->VTableUses.push_back(std::make_pair(Class, Loc));
+ SemaObj->VTablesUsed[Class] = DefinitionRequired;
+ }
+
+ // If there were any dynamic classes declarations, deserialize them
+ // and add them to Sema's vector of such declarations.
+ for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I)
+ SemaObj->DynamicClasses.push_back(
+ cast<CXXRecordDecl>(GetDecl(DynamicClasses[I])));
}
IdentifierInfo* PCHReader::get(const char *NameStart, const char *NameEnd) {
@@ -2853,11 +3100,11 @@ Selector PCHReader::DecodeSelector(unsigned ID) {
return SelectorsLoaded[Index];
}
-Selector PCHReader::GetSelector(uint32_t ID) {
+Selector PCHReader::GetExternalSelector(uint32_t ID) {
return DecodeSelector(ID);
}
-uint32_t PCHReader::GetNumKnownSelectors() {
+uint32_t PCHReader::GetNumExternalSelectors() {
return TotalNumSelectors + 1;
}
@@ -2901,6 +3148,126 @@ PCHReader::ReadDeclarationName(const RecordData &Record, unsigned &Idx) {
return DeclarationName();
}
+TemplateName
+PCHReader::ReadTemplateName(const RecordData &Record, unsigned &Idx) {
+ TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case TemplateName::Template:
+ return TemplateName(cast_or_null<TemplateDecl>(GetDecl(Record[Idx++])));
+
+ case TemplateName::OverloadedTemplate: {
+ unsigned size = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ while (size--)
+ Decls.addDecl(cast<NamedDecl>(GetDecl(Record[Idx++])));
+
+ return Context->getOverloadedTemplateName(Decls.begin(), Decls.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
+ bool hasTemplKeyword = Record[Idx++];
+ TemplateDecl *Template = cast<TemplateDecl>(GetDecl(Record[Idx++]));
+ return Context->getQualifiedTemplateName(NNS, hasTemplKeyword, Template);
+ }
+
+ case TemplateName::DependentTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx);
+ if (Record[Idx++]) // isIdentifier
+ return Context->getDependentTemplateName(NNS,
+ GetIdentifierInfo(Record, Idx));
+ return Context->getDependentTemplateName(NNS,
+ (OverloadedOperatorKind)Record[Idx++]);
+ }
+ }
+
+ assert(0 && "Unhandled template name kind!");
+ return TemplateName();
+}
+
+TemplateArgument
+PCHReader::ReadTemplateArgument(const RecordData &Record, unsigned &Idx) {
+ switch ((TemplateArgument::ArgKind)Record[Idx++]) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+ case TemplateArgument::Type:
+ return TemplateArgument(GetType(Record[Idx++]));
+ case TemplateArgument::Declaration:
+ return TemplateArgument(GetDecl(Record[Idx++]));
+ case TemplateArgument::Integral: {
+ llvm::APSInt Value = ReadAPSInt(Record, Idx);
+ QualType T = GetType(Record[Idx++]);
+ return TemplateArgument(Value, T);
+ }
+ case TemplateArgument::Template:
+ return TemplateArgument(ReadTemplateName(Record, Idx));
+ case TemplateArgument::Expression:
+ return TemplateArgument(ReadExpr());
+ case TemplateArgument::Pack: {
+ unsigned NumArgs = Record[Idx++];
+ llvm::SmallVector<TemplateArgument, 8> Args;
+ Args.reserve(NumArgs);
+ while (NumArgs--)
+ Args.push_back(ReadTemplateArgument(Record, Idx));
+ TemplateArgument TemplArg;
+ TemplArg.setArgumentPack(Args.data(), Args.size(), /*CopyArgs=*/true);
+ return TemplArg;
+ }
+ }
+
+ assert(0 && "Unhandled template argument kind!");
+ return TemplateArgument();
+}
+
+TemplateParameterList *
+PCHReader::ReadTemplateParameterList(const RecordData &Record, unsigned &Idx) {
+ SourceLocation TemplateLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation LAngleLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation RAngleLoc = ReadSourceLocation(Record, Idx);
+
+ unsigned NumParams = Record[Idx++];
+ llvm::SmallVector<NamedDecl *, 16> Params;
+ Params.reserve(NumParams);
+ while (NumParams--)
+ Params.push_back(cast<NamedDecl>(GetDecl(Record[Idx++])));
+
+ TemplateParameterList* TemplateParams =
+ TemplateParameterList::Create(*Context, TemplateLoc, LAngleLoc,
+ Params.data(), Params.size(), RAngleLoc);
+ return TemplateParams;
+}
+
+void
+PCHReader::
+ReadTemplateArgumentList(llvm::SmallVector<TemplateArgument, 8> &TemplArgs,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned NumTemplateArgs = Record[Idx++];
+ TemplArgs.reserve(NumTemplateArgs);
+ while (NumTemplateArgs--)
+ TemplArgs.push_back(ReadTemplateArgument(Record, Idx));
+}
+
+/// \brief Read a UnresolvedSet structure.
+void PCHReader::ReadUnresolvedSet(UnresolvedSetImpl &Set,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned NumDecls = Record[Idx++];
+ while (NumDecls--) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(Record[Idx++]));
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Set.addDecl(D, AS);
+ }
+}
+
+CXXBaseSpecifier
+PCHReader::ReadCXXBaseSpecifier(const RecordData &Record, unsigned &Idx) {
+ bool isVirtual = static_cast<bool>(Record[Idx++]);
+ bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
+ AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]);
+ QualType T = GetType(Record[Idx++]);
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ return CXXBaseSpecifier(Range, isVirtual, isBaseOfClass, AS, T);
+}
+
NestedNameSpecifier *
PCHReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) {
unsigned N = Record[Idx++];
@@ -2934,16 +3301,17 @@ PCHReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) {
// No associated value, and there can't be a prefix.
break;
}
- Prev = NNS;
}
+ Prev = NNS;
}
return NNS;
}
SourceRange
PCHReader::ReadSourceRange(const RecordData &Record, unsigned &Idx) {
- return SourceRange(SourceLocation::getFromRawEncoding(Record[Idx++]),
- SourceLocation::getFromRawEncoding(Record[Idx++]));
+ SourceLocation beg = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation end = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ return SourceRange(beg, end);
}
/// \brief Read an integral value
@@ -3090,6 +3458,11 @@ PCHReader::LoadingTypeOrDecl::~LoadingTypeOrDecl() {
true);
Reader.PendingIdentifierInfos.pop_front();
}
+
+ // We are not in recursive loading, so it's safe to pass the "interesting"
+ // decls to the consumer.
+ if (Reader.Consumer)
+ Reader.PassInterestingDeclsToConsumer();
}
Reader.CurrentlyLoadingTypeOrDecl = Parent;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp
index 1ef0441..742f0e4 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp
@@ -27,16 +27,19 @@ using namespace clang;
// Declaration deserialization
//===----------------------------------------------------------------------===//
-namespace {
+namespace clang {
class PCHDeclReader : public DeclVisitor<PCHDeclReader, void> {
PCHReader &Reader;
const PCHReader::RecordData &Record;
unsigned &Idx;
+ pch::TypeID TypeIDForTypeDecl;
public:
PCHDeclReader(PCHReader &Reader, const PCHReader::RecordData &Record,
unsigned &Idx)
- : Reader(Reader), Record(Record), Idx(Idx) { }
+ : Reader(Reader), Record(Record), Idx(Idx), TypeIDForTypeDecl(0) { }
+
+ void Visit(Decl *D);
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
@@ -46,7 +49,7 @@ namespace {
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *TD);
void VisitTypedefDecl(TypedefDecl *TD);
- void VisitUnresolvedUsingTypename(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
void VisitTagDecl(TagDecl *TD);
void VisitEnumDecl(EnumDecl *ED);
void VisitRecordDecl(RecordDecl *RD);
@@ -58,7 +61,7 @@ namespace {
void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
void VisitValueDecl(ValueDecl *VD);
void VisitEnumConstantDecl(EnumConstantDecl *ECD);
- void VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
void VisitDeclaratorDecl(DeclaratorDecl *DD);
void VisitFunctionDecl(FunctionDecl *FD);
void VisitCXXMethodDecl(CXXMethodDecl *D);
@@ -72,19 +75,21 @@ namespace {
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
- void visitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
- void VisitUsing(UsingDecl *D);
- void VisitUsingShadow(UsingShadowDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
void VisitFriendTemplateDecl(FriendTemplateDecl *D);
void VisitStaticAssertDecl(StaticAssertDecl *D);
void VisitBlockDecl(BlockDecl *BD);
std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC);
- // FIXME: Reorder according to DeclNodes.def?
+ // FIXME: Reorder according to DeclNodes.td?
void VisitObjCMethodDecl(ObjCMethodDecl *D);
void VisitObjCContainerDecl(ObjCContainerDecl *D);
void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
@@ -103,6 +108,19 @@ namespace {
};
}
+void PCHDeclReader::Visit(Decl *D) {
+ DeclVisitor<PCHDeclReader, void>::Visit(D);
+
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ // if we have a fully initialized TypeDecl, we can safely read its type now.
+ TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtr());
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // FunctionDecl's body was written last after all other Stmts/Exprs.
+ if (Record[Idx++])
+ FD->setLazyBody(Reader.getDeclsCursor().GetCurrentBitNo());
+ }
+}
+
void PCHDeclReader::VisitDecl(Decl *D) {
D->setDeclContext(cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++])));
D->setLexicalDeclContext(
@@ -110,7 +128,7 @@ void PCHDeclReader::VisitDecl(Decl *D) {
D->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
D->setInvalidDecl(Record[Idx++]);
if (Record[Idx++])
- D->addAttr(Reader.ReadAttributes());
+ D->initAttrs(Reader.ReadAttributes());
D->setImplicit(Record[Idx++]);
D->setUsed(Record[Idx++]);
D->setAccess((AccessSpecifier)Record[Idx++]);
@@ -130,21 +148,18 @@ void PCHDeclReader::VisitNamedDecl(NamedDecl *ND) {
void PCHDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
- TD->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr());
+ // Delay type reading until after we have fully initialized the decl.
+ TypeIDForTypeDecl = Record[Idx++];
}
void PCHDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
- // Note that we cannot use VisitTypeDecl here, because we need to
- // set the underlying type of the typedef *before* we try to read
- // the type associated with the TypedefDecl.
- VisitNamedDecl(TD);
- uint64_t TypeData = Record[Idx++];
+ VisitTypeDecl(TD);
TD->setTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx));
- TD->setTypeForDecl(Reader.GetType(TypeData).getTypePtr());
}
void PCHDeclReader::VisitTagDecl(TagDecl *TD) {
VisitTypeDecl(TD);
+ TD->IdentifierNamespace = Record[Idx++];
TD->setPreviousDeclaration(
cast_or_null<TagDecl>(Reader.GetDecl(Record[Idx++])));
TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
@@ -163,7 +178,8 @@ void PCHDeclReader::VisitEnumDecl(EnumDecl *ED) {
ED->setPromotionType(Reader.GetType(Record[Idx++]));
ED->setNumPositiveBits(Record[Idx++]);
ED->setNumNegativeBits(Record[Idx++]);
- // FIXME: C++ InstantiatedFrom
+ ED->setInstantiationOfMemberEnum(
+ cast_or_null<EnumDecl>(Reader.GetDecl(Record[Idx++])));
}
void PCHDeclReader::VisitRecordDecl(RecordDecl *RD) {
@@ -181,7 +197,7 @@ void PCHDeclReader::VisitValueDecl(ValueDecl *VD) {
void PCHDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
VisitValueDecl(ECD);
if (Record[Idx++])
- ECD->setInitExpr(Reader.ReadDeclExpr());
+ ECD->setInitExpr(Reader.ReadExpr());
ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
}
@@ -195,9 +211,83 @@ void PCHDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
VisitDeclaratorDecl(FD);
- if (Record[Idx++])
- FD->setLazyBody(Reader.getDeclsCursor().GetCurrentBitNo());
- FD->setPreviousDeclaration(
+
+ FD->IdentifierNamespace = Record[Idx++];
+ switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
+ default: assert(false && "Unhandled TemplatedKind!");
+ break;
+ case FunctionDecl::TK_NonTemplate:
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ FD->setDescribedFunctionTemplate(
+ cast<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++])));
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ FunctionDecl *InstFD = cast<FunctionDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+ FD->setInstantiationOfMemberFunction(InstFD, TSK);
+ FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateDecl *Template
+ = cast<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+
+ // Template arguments.
+ llvm::SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx);
+
+ // Template args as written.
+ llvm::SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ if (Record[Idx++]) { // TemplateArgumentsAsWritten != 0
+ unsigned NumTemplateArgLocs = Record[Idx++];
+ TemplArgLocs.reserve(NumTemplateArgLocs);
+ for (unsigned i=0; i != NumTemplateArgLocs; ++i)
+ TemplArgLocs.push_back(Reader.ReadTemplateArgumentLoc(Record, Idx));
+
+ LAngleLoc = Reader.ReadSourceLocation(Record, Idx);
+ RAngleLoc = Reader.ReadSourceLocation(Record, Idx);
+ }
+
+ SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+
+ FD->setFunctionTemplateSpecialization(Template, TemplArgs.size(),
+ TemplArgs.data(), TSK,
+ TemplArgLocs.size(),
+ TemplArgLocs.data(),
+ LAngleLoc, RAngleLoc, POI);
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ // Templates.
+ UnresolvedSet<8> TemplDecls;
+ unsigned NumTemplates = Record[Idx++];
+ while (NumTemplates--)
+ TemplDecls.addDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
+
+ // Templates args.
+ TemplateArgumentListInfo TemplArgs;
+ unsigned NumArgs = Record[Idx++];
+ while (NumArgs--)
+ TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx));
+ TemplArgs.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+ TemplArgs.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+
+ FD->setDependentTemplateSpecialization(*Reader.getContext(),
+ TemplDecls, TemplArgs);
+ break;
+ }
+ }
+
+ // FunctionDecl's body is handled last at PCHReaderDecl::Visit,
+ // after everything else is read.
+
+ // Avoid side effects and invariant checking of FunctionDecl's
+ // setPreviousDeclaration.
+ FD->redeclarable_base::setPreviousDeclaration(
cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
FD->setStorageClass((FunctionDecl::StorageClass)Record[Idx++]);
FD->setStorageClassAsWritten((FunctionDecl::StorageClass)Record[Idx++]);
@@ -211,7 +301,6 @@ void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FD->setCopyAssignment(Record[Idx++]);
FD->setHasImplicitReturnZero(Record[Idx++]);
FD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
- // FIXME: C++ TemplateOrInstantiation
// Read in the parameters.
unsigned NumParams = Record[Idx++];
@@ -220,11 +309,6 @@ void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
for (unsigned I = 0; I != NumParams; ++I)
Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
FD->setParams(Params.data(), NumParams);
-
- // FIXME: order this properly w.r.t. friendness
- // FIXME: this same thing needs to happen for function templates
- if (FD->isOverloadedOperator() && !FD->getDeclContext()->isRecord())
- FD->setNonMemberOperator();
}
void PCHDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
@@ -232,7 +316,7 @@ void PCHDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
if (Record[Idx++]) {
// In practice, this won't be executed (since method definitions
// don't occur in header files).
- MD->setBody(Reader.ReadDeclStmt());
+ MD->setBody(Reader.ReadStmt());
MD->setSelfDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
MD->setCmdDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
}
@@ -374,10 +458,12 @@ void PCHDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
void PCHDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
D->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- D->setType(Reader.GetType(Record[Idx++]));
+ D->setType(Reader.GetTypeSourceInfo(Record, Idx));
// FIXME: stable encoding
D->setPropertyAttributes(
(ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ D->setPropertyAttributesAsWritten(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
// FIXME: stable encoding
D->setPropertyImplementation(
(ObjCPropertyDecl::PropertyControl)Record[Idx++]);
@@ -424,7 +510,12 @@ void PCHDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
FD->setMutable(Record[Idx++]);
if (Record[Idx++])
- FD->setBitWidth(Reader.ReadDeclExpr());
+ FD->setBitWidth(Reader.ReadExpr());
+ if (!FD->getDeclName()) {
+ FieldDecl *Tmpl = cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++]));
+ if (Tmpl)
+ Reader.getContext()->setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
+ }
}
void PCHDeclReader::VisitVarDecl(VarDecl *VD) {
@@ -439,7 +530,14 @@ void PCHDeclReader::VisitVarDecl(VarDecl *VD) {
VD->setPreviousDeclaration(
cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
if (Record[Idx++])
- VD->setInit(Reader.ReadDeclExpr());
+ VD->setInit(Reader.ReadExpr());
+
+ if (Record[Idx++]) { // HasMemberSpecializationInfo.
+ VarDecl *Tmpl = cast<VarDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+ Reader.getContext()->setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
+ }
}
void PCHDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
@@ -450,16 +548,19 @@ void PCHDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
VisitVarDecl(PD);
PD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
PD->setHasInheritedDefaultArg(Record[Idx++]);
+ if (Record[Idx++]) // hasUninstantiatedDefaultArg.
+ PD->setUninstantiatedDefaultArg(Reader.ReadExpr());
}
void PCHDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
VisitDecl(AD);
- AD->setAsmString(cast<StringLiteral>(Reader.ReadDeclExpr()));
+ AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr()));
}
void PCHDeclReader::VisitBlockDecl(BlockDecl *BD) {
VisitDecl(BD);
- BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadDeclStmt()));
+ BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt()));
+ BD->setSignatureAsWritten(Reader.GetTypeSourceInfo(Record, Idx));
unsigned NumParams = Record[Idx++];
llvm::SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
@@ -481,13 +582,9 @@ void PCHDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
D->setNextNamespace(
cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
- // Only read one reference--the original or anonymous namespace.
bool IsOriginal = Record[Idx++];
- if (IsOriginal)
- D->setAnonymousNamespace(
- cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
- else
- D->setOriginalNamespace(
+ D->OrigOrAnonNamespace.setInt(IsOriginal);
+ D->OrigOrAnonNamespace.setPointer(
cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
}
@@ -501,7 +598,7 @@ void PCHDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
D->setAliasedNamespace(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
}
-void PCHDeclReader::VisitUsing(UsingDecl *D) {
+void PCHDeclReader::VisitUsingDecl(UsingDecl *D) {
VisitNamedDecl(D);
D->setUsingLocation(Reader.ReadSourceLocation(Record, Idx));
D->setNestedNameRange(Reader.ReadSourceRange(Record, Idx));
@@ -512,15 +609,24 @@ void PCHDeclReader::VisitUsing(UsingDecl *D) {
// would avoid existence checks.
unsigned NumShadows = Record[Idx++];
for(unsigned I = 0; I != NumShadows; ++I) {
- D->addShadowDecl(cast<UsingShadowDecl>(Reader.GetDecl(Record[Idx++])));
+ // Avoid invariant checking of UsingDecl::addShadowDecl, the decl may still
+ // be initializing.
+ D->Shadows.insert(cast<UsingShadowDecl>(Reader.GetDecl(Record[Idx++])));
}
D->setTypeName(Record[Idx++]);
+ NamedDecl *Pattern = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+ if (Pattern)
+ Reader.getContext()->setInstantiatedFromUsingDecl(D, Pattern);
}
-void PCHDeclReader::VisitUsingShadow(UsingShadowDecl *D) {
+void PCHDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitNamedDecl(D);
D->setTargetDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
D->setUsingDecl(cast<UsingDecl>(Reader.GetDecl(Record[Idx++])));
+ UsingShadowDecl *Pattern
+ = cast_or_null<UsingShadowDecl>(Reader.GetDecl(Record[Idx++]));
+ if (Pattern)
+ Reader.getContext()->setInstantiatedFromUsingShadowDecl(D, Pattern);
}
void PCHDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
@@ -534,14 +640,14 @@ void PCHDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
Reader.GetDecl(Record[Idx++])));
}
-void PCHDeclReader::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) {
+void PCHDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
VisitValueDecl(D);
D->setTargetNestedNameRange(Reader.ReadSourceRange(Record, Idx));
D->setUsingLoc(Reader.ReadSourceLocation(Record, Idx));
D->setTargetNestedNameSpecifier(Reader.ReadNestedNameSpecifier(Record, Idx));
}
-void PCHDeclReader::VisitUnresolvedUsingTypename(
+void PCHDeclReader::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
VisitTypeDecl(D);
D->setTargetNestedNameRange(Reader.ReadSourceRange(Record, Idx));
@@ -551,28 +657,196 @@ void PCHDeclReader::VisitUnresolvedUsingTypename(
}
void PCHDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
- // assert(false && "cannot read CXXRecordDecl");
+ ASTContext &C = *Reader.getContext();
+
+ // We need to allocate the DefinitionData struct ahead of VisitRecordDecl
+ // so that the other CXXRecordDecls can get a pointer even when the owner
+ // is still initializing.
+ bool OwnsDefinitionData = false;
+ enum DataOwnership { Data_NoDefData, Data_Owner, Data_NotOwner };
+ switch ((DataOwnership)Record[Idx++]) {
+ default:
+ assert(0 && "Out of sync with PCHDeclWriter or messed up reading");
+ case Data_NoDefData:
+ break;
+ case Data_Owner:
+ OwnsDefinitionData = true;
+ D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
+ break;
+ case Data_NotOwner:
+ D->DefinitionData
+ = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]))->DefinitionData;
+ break;
+ }
+
VisitRecordDecl(D);
+
+ if (OwnsDefinitionData) {
+ assert(D->DefinitionData);
+ struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
+
+ Data.UserDeclaredConstructor = Record[Idx++];
+ Data.UserDeclaredCopyConstructor = Record[Idx++];
+ Data.UserDeclaredCopyAssignment = Record[Idx++];
+ Data.UserDeclaredDestructor = Record[Idx++];
+ Data.Aggregate = Record[Idx++];
+ Data.PlainOldData = Record[Idx++];
+ Data.Empty = Record[Idx++];
+ Data.Polymorphic = Record[Idx++];
+ Data.Abstract = Record[Idx++];
+ Data.HasTrivialConstructor = Record[Idx++];
+ Data.HasTrivialCopyConstructor = Record[Idx++];
+ Data.HasTrivialCopyAssignment = Record[Idx++];
+ Data.HasTrivialDestructor = Record[Idx++];
+ Data.ComputedVisibleConversions = Record[Idx++];
+ Data.DeclaredDefaultConstructor = Record[Idx++];
+ Data.DeclaredCopyConstructor = Record[Idx++];
+ Data.DeclaredCopyAssignment = Record[Idx++];
+ Data.DeclaredDestructor = Record[Idx++];
+
+ // setBases() is unsuitable since it may try to iterate the bases of an
+ // unitialized base.
+ Data.NumBases = Record[Idx++];
+ Data.Bases = new(C) CXXBaseSpecifier [Data.NumBases];
+ for (unsigned i = 0; i != Data.NumBases; ++i)
+ Data.Bases[i] = Reader.ReadCXXBaseSpecifier(Record, Idx);
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Data.NumVBases = Record[Idx++];
+ Data.VBases = new(C) CXXBaseSpecifier [Data.NumVBases];
+ for (unsigned i = 0; i != Data.NumVBases; ++i)
+ Data.VBases[i] = Reader.ReadCXXBaseSpecifier(Record, Idx);
+
+ Reader.ReadUnresolvedSet(Data.Conversions, Record, Idx);
+ Reader.ReadUnresolvedSet(Data.VisibleConversions, Record, Idx);
+ assert(Data.Definition && "Data.Definition should be already set!");
+ Data.FirstFriend
+ = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++]));
+ }
+
+ enum CXXRecKind {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ switch ((CXXRecKind)Record[Idx++]) {
+ default:
+ assert(false && "Out of sync with PCHDeclWriter::VisitCXXRecordDecl?");
+ case CXXRecNotTemplate:
+ break;
+ case CXXRecTemplate:
+ D->setDescribedClassTemplate(
+ cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++])));
+ break;
+ case CXXRecMemberSpecialization: {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+ D->setInstantiationOfMemberClass(RD, TSK);
+ D->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ break;
+ }
+ }
}
void PCHDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
- // assert(false && "cannot read CXXMethodDecl");
VisitFunctionDecl(D);
+ unsigned NumOverridenMethods = Record[Idx++];
+ while (NumOverridenMethods--) {
+ CXXMethodDecl *MD = cast<CXXMethodDecl>(Reader.GetDecl(Record[Idx++]));
+ // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
+ // MD may be initializing.
+ Reader.getContext()->addOverriddenMethod(D, MD);
+ }
}
void PCHDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
- // assert(false && "cannot read CXXConstructorDecl");
VisitCXXMethodDecl(D);
+
+ D->IsExplicitSpecified = Record[Idx++];
+ D->ImplicitlyDefined = Record[Idx++];
+
+ unsigned NumInitializers = Record[Idx++];
+ D->NumBaseOrMemberInitializers = NumInitializers;
+ if (NumInitializers) {
+ ASTContext &C = *Reader.getContext();
+
+ D->BaseOrMemberInitializers
+ = new (C) CXXBaseOrMemberInitializer*[NumInitializers];
+ for (unsigned i=0; i != NumInitializers; ++i) {
+ TypeSourceInfo *BaseClassInfo = 0;
+ bool IsBaseVirtual = false;
+ FieldDecl *Member = 0;
+
+ bool IsBaseInitializer = Record[Idx++];
+ if (IsBaseInitializer) {
+ BaseClassInfo = Reader.GetTypeSourceInfo(Record, Idx);
+ IsBaseVirtual = Record[Idx++];
+ } else {
+ Member = cast<FieldDecl>(Reader.GetDecl(Record[Idx++]));
+ }
+ SourceLocation MemberLoc = Reader.ReadSourceLocation(Record, Idx);
+ Expr *Init = Reader.ReadExpr();
+ FieldDecl *AnonUnionMember
+ = cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++]));
+ SourceLocation LParenLoc = Reader.ReadSourceLocation(Record, Idx);
+ SourceLocation RParenLoc = Reader.ReadSourceLocation(Record, Idx);
+ bool IsWritten = Record[Idx++];
+ unsigned SourceOrderOrNumArrayIndices;
+ llvm::SmallVector<VarDecl *, 8> Indices;
+ if (IsWritten) {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ } else {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ Indices.reserve(SourceOrderOrNumArrayIndices);
+ for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
+ Indices.push_back(cast<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ }
+
+ CXXBaseOrMemberInitializer *BOMInit;
+ if (IsBaseInitializer) {
+ BOMInit = new (C) CXXBaseOrMemberInitializer(C, BaseClassInfo,
+ IsBaseVirtual, LParenLoc,
+ Init, RParenLoc);
+ } else if (IsWritten) {
+ BOMInit = new (C) CXXBaseOrMemberInitializer(C, Member, MemberLoc,
+ LParenLoc, Init, RParenLoc);
+ } else {
+ BOMInit = CXXBaseOrMemberInitializer::Create(C, Member, MemberLoc,
+ LParenLoc, Init, RParenLoc,
+ Indices.data(),
+ Indices.size());
+ }
+
+ BOMInit->setAnonUnionMember(AnonUnionMember);
+ D->BaseOrMemberInitializers[i] = BOMInit;
+ }
+ }
}
void PCHDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
- // assert(false && "cannot read CXXDestructorDecl");
VisitCXXMethodDecl(D);
+
+ D->ImplicitlyDefined = Record[Idx++];
+ D->OperatorDelete = cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++]));
}
void PCHDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
- // assert(false && "cannot read CXXConversionDecl");
VisitCXXMethodDecl(D);
+ D->IsExplicitSpecified = Record[Idx++];
+}
+
+void PCHDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ D->setColonLoc(Reader.ReadSourceLocation(Record, Idx));
+}
+
+void PCHDeclReader::VisitFriendDecl(FriendDecl *D) {
+ VisitDecl(D);
+ if (Record[Idx++])
+ D->Friend = Reader.GetTypeSourceInfo(Record, Idx);
+ else
+ D->Friend = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+ D->NextFriend = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++]));
+ D->FriendLoc = Reader.ReadSourceLocation(Record, Idx);
}
void PCHDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
@@ -580,37 +854,171 @@ void PCHDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
}
void PCHDeclReader::VisitTemplateDecl(TemplateDecl *D) {
- assert(false && "cannot read TemplateDecl");
+ VisitNamedDecl(D);
+
+ NamedDecl *TemplatedDecl
+ = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+ TemplateParameterList* TemplateParams
+ = Reader.ReadTemplateParameterList(Record, Idx);
+ D->init(TemplatedDecl, TemplateParams);
}
void PCHDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
- assert(false && "cannot read ClassTemplateDecl");
+ VisitTemplateDecl(D);
+
+ D->IdentifierNamespace = Record[Idx++];
+ ClassTemplateDecl *PrevDecl =
+ cast_or_null<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]));
+ D->setPreviousDeclaration(PrevDecl);
+ if (PrevDecl == 0) {
+ // This ClassTemplateDecl owns a CommonPtr; read it.
+
+ // FoldingSets are filled in VisitClassTemplateSpecializationDecl.
+ unsigned size = Record[Idx++];
+ while (size--)
+ cast<ClassTemplateSpecializationDecl>(Reader.GetDecl(Record[Idx++]));
+
+ size = Record[Idx++];
+ while (size--)
+ cast<ClassTemplatePartialSpecializationDecl>(
+ Reader.GetDecl(Record[Idx++]));
+
+ // InjectedClassNameType is computed.
+
+ if (ClassTemplateDecl *CTD
+ = cast_or_null<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]))) {
+ D->setInstantiatedFromMemberTemplate(CTD);
+ if (Record[Idx++])
+ D->setMemberSpecialization();
+ }
+ }
}
void PCHDeclReader::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
- assert(false && "cannot read ClassTemplateSpecializationDecl");
+ VisitCXXRecordDecl(D);
+
+ if (Decl *InstD = Reader.GetDecl(Record[Idx++])) {
+ if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
+ D->setInstantiationOf(CTD);
+ } else {
+ llvm::SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx);
+ D->setInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(InstD),
+ TemplArgs.data(), TemplArgs.size());
+ }
+ }
+
+ // Explicit info.
+ if (TypeSourceInfo *TyInfo = Reader.GetTypeSourceInfo(Record, Idx)) {
+ D->setTypeAsWritten(TyInfo);
+ D->setExternLoc(Reader.ReadSourceLocation(Record, Idx));
+ D->setTemplateKeywordLoc(Reader.ReadSourceLocation(Record, Idx));
+ }
+
+ llvm::SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx);
+ D->initTemplateArgs(TemplArgs.data(), TemplArgs.size());
+ SourceLocation POI = Reader.ReadSourceLocation(Record, Idx);
+ if (POI.isValid())
+ D->setPointOfInstantiation(POI);
+ D->setSpecializationKind((TemplateSpecializationKind)Record[Idx++]);
+
+ if (Record[Idx++]) { // IsKeptInFoldingSet.
+ ClassTemplateDecl *CanonPattern
+ = cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]));
+ if (ClassTemplatePartialSpecializationDecl *Partial
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ CanonPattern->getPartialSpecializations().InsertNode(Partial);
+ } else {
+ CanonPattern->getSpecializations().InsertNode(D);
+ }
+ }
}
void PCHDeclReader::VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D) {
- assert(false && "cannot read ClassTemplatePartialSpecializationDecl");
+ VisitClassTemplateSpecializationDecl(D);
+
+ D->initTemplateParameters(Reader.ReadTemplateParameterList(Record, Idx));
+
+ TemplateArgumentListInfo ArgInfos;
+ unsigned NumArgs = Record[Idx++];
+ while (NumArgs--)
+ ArgInfos.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx));
+ D->initTemplateArgsAsWritten(ArgInfos);
+
+ D->setSequenceNumber(Record[Idx++]);
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDeclaration() == 0) {
+ D->setInstantiatedFromMember(
+ cast_or_null<ClassTemplatePartialSpecializationDecl>(
+ Reader.GetDecl(Record[Idx++])));
+ if (Record[Idx++])
+ D->setMemberSpecialization();
+ }
}
-void PCHDeclReader::visitFunctionTemplateDecl(FunctionTemplateDecl *D) {
- assert(false && "cannot read FunctionTemplateDecl");
+void PCHDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ VisitTemplateDecl(D);
+
+ D->IdentifierNamespace = Record[Idx++];
+ FunctionTemplateDecl *PrevDecl =
+ cast_or_null<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]));
+ D->setPreviousDeclaration(PrevDecl);
+ if (PrevDecl == 0) {
+ // This FunctionTemplateDecl owns a CommonPtr; read it.
+
+ // Read the function specialization declarations.
+ // FunctionTemplateDecl's FunctionTemplateSpecializationInfos are filled
+ // through the specialized FunctionDecl's setFunctionTemplateSpecialization.
+ unsigned NumSpecs = Record[Idx++];
+ while (NumSpecs--)
+ Reader.GetDecl(Record[Idx++]);
+
+ if (FunctionTemplateDecl *CTD
+ = cast_or_null<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]))) {
+ D->setInstantiatedFromMemberTemplate(CTD);
+ if (Record[Idx++])
+ D->setMemberSpecialization();
+ }
+ }
}
void PCHDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
- assert(false && "cannot read TemplateTypeParmDecl");
+ VisitTypeDecl(D);
+
+ D->setDeclaredWithTypename(Record[Idx++]);
+ D->setParameterPack(Record[Idx++]);
+
+ bool Inherited = Record[Idx++];
+ TypeSourceInfo *DefArg = Reader.GetTypeSourceInfo(Record, Idx);
+ D->setDefaultArgument(DefArg, Inherited);
}
void PCHDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- assert(false && "cannot read NonTypeTemplateParmDecl");
+ VisitVarDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ // Rest of NonTypeTemplateParmDecl.
+ if (Record[Idx++]) {
+ Expr *DefArg = Reader.ReadExpr();
+ bool Inherited = Record[Idx++];
+ D->setDefaultArgument(DefArg, Inherited);
+ }
}
void PCHDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
- assert(false && "cannot read TemplateTemplateParmDecl");
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ // Rest of TemplateTemplateParmDecl.
+ TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(Record, Idx);
+ bool IsInherited = Record[Idx++];
+ D->setDefaultArgument(Arg, IsInherited);
}
void PCHDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
@@ -641,24 +1049,24 @@ Attr *PCHReader::ReadAttributes() {
(void)RecCode;
#define SIMPLE_ATTR(Name) \
- case Attr::Name: \
+ case attr::Name: \
New = ::new (*Context) Name##Attr(); \
break
#define STRING_ATTR(Name) \
- case Attr::Name: \
+ case attr::Name: \
New = ::new (*Context) Name##Attr(*Context, ReadString(Record, Idx)); \
break
#define UNSIGNED_ATTR(Name) \
- case Attr::Name: \
+ case attr::Name: \
New = ::new (*Context) Name##Attr(Record[Idx++]); \
break
Attr *Attrs = 0;
while (Idx < Record.size()) {
Attr *New = 0;
- Attr::Kind Kind = (Attr::Kind)Record[Idx++];
+ attr::Kind Kind = (attr::Kind)Record[Idx++];
bool IsInherited = Record[Idx++];
switch (Kind) {
@@ -674,14 +1082,14 @@ Attr *PCHReader::ReadAttributes() {
STRING_ATTR(AsmLabel);
SIMPLE_ATTR(BaseCheck);
- case Attr::Blocks:
+ case attr::Blocks:
New = ::new (*Context) BlocksAttr(
(BlocksAttr::BlocksAttrTypes)Record[Idx++]);
break;
SIMPLE_ATTR(CDecl);
- case Attr::Cleanup:
+ case attr::Cleanup:
New = ::new (*Context) CleanupAttr(
cast<FunctionDecl>(GetDecl(Record[Idx++])));
break;
@@ -695,7 +1103,7 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(FastCall);
SIMPLE_ATTR(Final);
- case Attr::Format: {
+ case attr::Format: {
std::string Type = ReadString(Record, Idx);
unsigned FormatIdx = Record[Idx++];
unsigned FirstArg = Record[Idx++];
@@ -703,13 +1111,13 @@ Attr *PCHReader::ReadAttributes() {
break;
}
- case Attr::FormatArg: {
+ case attr::FormatArg: {
unsigned FormatIdx = Record[Idx++];
New = ::new (*Context) FormatArgAttr(FormatIdx);
break;
}
- case Attr::Sentinel: {
+ case attr::Sentinel: {
int sentinel = Record[Idx++];
int nullPos = Record[Idx++];
New = ::new (*Context) SentinelAttr(sentinel, nullPos);
@@ -719,15 +1127,15 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(GNUInline);
SIMPLE_ATTR(Hiding);
- case Attr::IBActionKind:
+ case attr::IBAction:
New = ::new (*Context) IBActionAttr();
break;
- case Attr::IBOutletKind:
+ case attr::IBOutlet:
New = ::new (*Context) IBOutletAttr();
break;
- case Attr::IBOutletCollectionKind: {
+ case attr::IBOutletCollection: {
ObjCInterfaceDecl *D =
cast_or_null<ObjCInterfaceDecl>(GetDecl(Record[Idx++]));
New = ::new (*Context) IBOutletCollectionAttr(D);
@@ -740,7 +1148,7 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(NoReturn);
SIMPLE_ATTR(NoThrow);
- case Attr::NonNull: {
+ case attr::NonNull: {
unsigned Size = Record[Idx++];
llvm::SmallVector<unsigned, 16> ArgNums;
ArgNums.insert(ArgNums.end(), &Record[Idx], &Record[Idx] + Size);
@@ -749,7 +1157,7 @@ Attr *PCHReader::ReadAttributes() {
break;
}
- case Attr::ReqdWorkGroupSize: {
+ case attr::ReqdWorkGroupSize: {
unsigned X = Record[Idx++];
unsigned Y = Record[Idx++];
unsigned Z = Record[Idx++];
@@ -777,7 +1185,7 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(Unused);
SIMPLE_ATTR(Used);
- case Attr::Visibility:
+ case attr::Visibility:
New = ::new (*Context) VisibilityAttr(
(VisibilityAttr::VisibilityTypes)Record[Idx++]);
break;
@@ -848,6 +1256,8 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
// after reading this declaration.
SavedStreamPosition SavedPosition(DeclsCursor);
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+
// Note that we are loading a declaration record.
LoadingTypeOrDecl Loading(*this);
@@ -872,11 +1282,10 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
D = TypedefDecl::Create(*Context, 0, SourceLocation(), 0, 0);
break;
case pch::DECL_ENUM:
- D = EnumDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(), 0);
+ D = EnumDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_RECORD:
- D = RecordDecl::Create(*Context, TTK_Struct, 0, SourceLocation(),
- 0, SourceLocation(), 0);
+ D = RecordDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_ENUM_CONSTANT:
D = EnumConstantDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
@@ -923,8 +1332,7 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
DeclarationName());
break;
case pch::DECL_CXX_RECORD:
- D = CXXRecordDecl::Create(*Context, TTK_Struct, 0,
- SourceLocation(), 0, SourceLocation(), 0);
+ D = CXXRecordDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_CXX_METHOD:
D = CXXMethodDecl::Create(*Context, 0, SourceLocation(), DeclarationName(),
@@ -939,36 +1347,40 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
case pch::DECL_CXX_CONVERSION:
D = CXXConversionDecl::Create(*Context, Decl::EmptyShell());
break;
+ case pch::DECL_ACCESS_SPEC:
+ D = AccessSpecDecl::Create(*Context, AS_none, 0, SourceLocation(),
+ SourceLocation());
+ break;
case pch::DECL_FRIEND:
- assert(false && "cannot read FriendDecl");
+ D = FriendDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_FRIEND_TEMPLATE:
assert(false && "cannot read FriendTemplateDecl");
break;
- case pch::DECL_TEMPLATE:
- // FIXME: Should TemplateDecl be ABSTRACT_DECL???
- assert(false && "TemplateDecl should be abstract!");
- break;
case pch::DECL_CLASS_TEMPLATE:
- assert(false && "cannot read ClassTemplateDecl");
+ D = ClassTemplateDecl::Create(*Context, 0, SourceLocation(),
+ DeclarationName(), 0, 0, 0);
break;
case pch::DECL_CLASS_TEMPLATE_SPECIALIZATION:
- assert(false && "cannot read ClasstemplateSpecializationDecl");
+ D = ClassTemplateSpecializationDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
- assert(false && "cannot read ClassTemplatePartialSpecializationDecl");
+ D = ClassTemplatePartialSpecializationDecl::Create(*Context,
+ Decl::EmptyShell());
break;
case pch::DECL_FUNCTION_TEMPLATE:
- assert(false && "cannot read FunctionTemplateDecl");
+ D = FunctionTemplateDecl::Create(*Context, 0, SourceLocation(),
+ DeclarationName(), 0, 0);
break;
case pch::DECL_TEMPLATE_TYPE_PARM:
- assert(false && "cannot read TemplateTypeParmDecl");
+ D = TemplateTypeParmDecl::Create(*Context, Decl::EmptyShell());
break;
case pch::DECL_NON_TYPE_TEMPLATE_PARM:
- assert(false && "cannot read NonTypeTemplateParmDecl");
+ D = NonTypeTemplateParmDecl::Create(*Context, 0, SourceLocation(), 0,0,0,
+ QualType(),0);
break;
case pch::DECL_TEMPLATE_TEMPLATE_PARM:
- assert(false && "cannot read TemplateTemplateParmDecl");
+ D = TemplateTemplateParmDecl::Create(*Context, 0, SourceLocation(),0,0,0,0);
break;
case pch::DECL_STATIC_ASSERT:
assert(false && "cannot read StaticAssertDecl");
@@ -1013,7 +1425,7 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
break;
case pch::DECL_OBJC_PROPERTY:
D = ObjCPropertyDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(),
- QualType());
+ 0);
break;
case pch::DECL_OBJC_PROPERTY_IMPL:
D = ObjCPropertyImplDecl::Create(*Context, 0, SourceLocation(),
@@ -1062,16 +1474,11 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
assert(Idx == Record.size());
// If we have deserialized a declaration that has a definition the
- // AST consumer might need to know about, notify the consumer
- // about that definition now or queue it for later.
- if (isConsumerInterestedIn(D)) {
- if (Consumer) {
- DeclGroupRef DG(D);
- Consumer->HandleTopLevelDecl(DG);
- } else {
- InterestingDecls.push_back(D);
- }
- }
+ // AST consumer might need to know about, queue it.
+ // We don't pass it to the consumer immediately because we may be in recursive
+ // loading, and some declarations may still be initializing.
+ if (isConsumerInterestedIn(D))
+ InterestingDecls.push_back(D);
return D;
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp
index 3931adb..ace62d7 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp
@@ -17,17 +17,17 @@
#include "clang/AST/StmtVisitor.h"
using namespace clang;
-namespace {
- class PCHStmtReader : public StmtVisitor<PCHStmtReader, unsigned> {
+namespace clang {
+
+ class PCHStmtReader : public StmtVisitor<PCHStmtReader> {
PCHReader &Reader;
const PCHReader::RecordData &Record;
unsigned &Idx;
- llvm::SmallVectorImpl<Stmt *> &StmtStack;
public:
PCHStmtReader(PCHReader &Reader, const PCHReader::RecordData &Record,
- unsigned &Idx, llvm::SmallVectorImpl<Stmt *> &StmtStack)
- : Reader(Reader), Record(Record), Idx(Idx), StmtStack(StmtStack) { }
+ unsigned &Idx)
+ : Reader(Reader), Record(Record), Idx(Idx) { }
/// \brief The number of record fields required for the Stmt class
/// itself.
@@ -36,180 +36,201 @@ namespace {
/// \brief The number of record fields required for the Expr class
/// itself.
static const unsigned NumExprFields = NumStmtFields + 3;
-
- // Each of the Visit* functions reads in part of the expression
- // from the given record and the current expression stack, then
- // return the total number of operands that it read from the
- // expression stack.
-
- unsigned VisitStmt(Stmt *S);
- unsigned VisitNullStmt(NullStmt *S);
- unsigned VisitCompoundStmt(CompoundStmt *S);
- unsigned VisitSwitchCase(SwitchCase *S);
- unsigned VisitCaseStmt(CaseStmt *S);
- unsigned VisitDefaultStmt(DefaultStmt *S);
- unsigned VisitLabelStmt(LabelStmt *S);
- unsigned VisitIfStmt(IfStmt *S);
- unsigned VisitSwitchStmt(SwitchStmt *S);
- unsigned VisitWhileStmt(WhileStmt *S);
- unsigned VisitDoStmt(DoStmt *S);
- unsigned VisitForStmt(ForStmt *S);
- unsigned VisitGotoStmt(GotoStmt *S);
- unsigned VisitIndirectGotoStmt(IndirectGotoStmt *S);
- unsigned VisitContinueStmt(ContinueStmt *S);
- unsigned VisitBreakStmt(BreakStmt *S);
- unsigned VisitReturnStmt(ReturnStmt *S);
- unsigned VisitDeclStmt(DeclStmt *S);
- unsigned VisitAsmStmt(AsmStmt *S);
- unsigned VisitExpr(Expr *E);
- unsigned VisitPredefinedExpr(PredefinedExpr *E);
- unsigned VisitDeclRefExpr(DeclRefExpr *E);
- unsigned VisitIntegerLiteral(IntegerLiteral *E);
- unsigned VisitFloatingLiteral(FloatingLiteral *E);
- unsigned VisitImaginaryLiteral(ImaginaryLiteral *E);
- unsigned VisitStringLiteral(StringLiteral *E);
- unsigned VisitCharacterLiteral(CharacterLiteral *E);
- unsigned VisitParenExpr(ParenExpr *E);
- unsigned VisitUnaryOperator(UnaryOperator *E);
- unsigned VisitOffsetOfExpr(OffsetOfExpr *E);
- unsigned VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
- unsigned VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- unsigned VisitCallExpr(CallExpr *E);
- unsigned VisitMemberExpr(MemberExpr *E);
- unsigned VisitCastExpr(CastExpr *E);
- unsigned VisitBinaryOperator(BinaryOperator *E);
- unsigned VisitCompoundAssignOperator(CompoundAssignOperator *E);
- unsigned VisitConditionalOperator(ConditionalOperator *E);
- unsigned VisitImplicitCastExpr(ImplicitCastExpr *E);
- unsigned VisitExplicitCastExpr(ExplicitCastExpr *E);
- unsigned VisitCStyleCastExpr(CStyleCastExpr *E);
- unsigned VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- unsigned VisitExtVectorElementExpr(ExtVectorElementExpr *E);
- unsigned VisitInitListExpr(InitListExpr *E);
- unsigned VisitDesignatedInitExpr(DesignatedInitExpr *E);
- unsigned VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
- unsigned VisitVAArgExpr(VAArgExpr *E);
- unsigned VisitAddrLabelExpr(AddrLabelExpr *E);
- unsigned VisitStmtExpr(StmtExpr *E);
- unsigned VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
- unsigned VisitChooseExpr(ChooseExpr *E);
- unsigned VisitGNUNullExpr(GNUNullExpr *E);
- unsigned VisitShuffleVectorExpr(ShuffleVectorExpr *E);
- unsigned VisitBlockExpr(BlockExpr *E);
- unsigned VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
- unsigned VisitObjCStringLiteral(ObjCStringLiteral *E);
- unsigned VisitObjCEncodeExpr(ObjCEncodeExpr *E);
- unsigned VisitObjCSelectorExpr(ObjCSelectorExpr *E);
- unsigned VisitObjCProtocolExpr(ObjCProtocolExpr *E);
- unsigned VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
- unsigned VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- unsigned VisitObjCImplicitSetterGetterRefExpr(
+
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList,
+ unsigned NumTemplateArgs);
+
+ void VisitStmt(Stmt *S);
+ void VisitNullStmt(NullStmt *S);
+ void VisitCompoundStmt(CompoundStmt *S);
+ void VisitSwitchCase(SwitchCase *S);
+ void VisitCaseStmt(CaseStmt *S);
+ void VisitDefaultStmt(DefaultStmt *S);
+ void VisitLabelStmt(LabelStmt *S);
+ void VisitIfStmt(IfStmt *S);
+ void VisitSwitchStmt(SwitchStmt *S);
+ void VisitWhileStmt(WhileStmt *S);
+ void VisitDoStmt(DoStmt *S);
+ void VisitForStmt(ForStmt *S);
+ void VisitGotoStmt(GotoStmt *S);
+ void VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ void VisitContinueStmt(ContinueStmt *S);
+ void VisitBreakStmt(BreakStmt *S);
+ void VisitReturnStmt(ReturnStmt *S);
+ void VisitDeclStmt(DeclStmt *S);
+ void VisitAsmStmt(AsmStmt *S);
+ void VisitExpr(Expr *E);
+ void VisitPredefinedExpr(PredefinedExpr *E);
+ void VisitDeclRefExpr(DeclRefExpr *E);
+ void VisitIntegerLiteral(IntegerLiteral *E);
+ void VisitFloatingLiteral(FloatingLiteral *E);
+ void VisitImaginaryLiteral(ImaginaryLiteral *E);
+ void VisitStringLiteral(StringLiteral *E);
+ void VisitCharacterLiteral(CharacterLiteral *E);
+ void VisitParenExpr(ParenExpr *E);
+ void VisitParenListExpr(ParenListExpr *E);
+ void VisitUnaryOperator(UnaryOperator *E);
+ void VisitOffsetOfExpr(OffsetOfExpr *E);
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ void VisitCallExpr(CallExpr *E);
+ void VisitMemberExpr(MemberExpr *E);
+ void VisitCastExpr(CastExpr *E);
+ void VisitBinaryOperator(BinaryOperator *E);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ void VisitConditionalOperator(ConditionalOperator *E);
+ void VisitImplicitCastExpr(ImplicitCastExpr *E);
+ void VisitExplicitCastExpr(ExplicitCastExpr *E);
+ void VisitCStyleCastExpr(CStyleCastExpr *E);
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *E);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitDesignatedInitExpr(DesignatedInitExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitVAArgExpr(VAArgExpr *E);
+ void VisitAddrLabelExpr(AddrLabelExpr *E);
+ void VisitStmtExpr(StmtExpr *E);
+ void VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
+ void VisitChooseExpr(ChooseExpr *E);
+ void VisitGNUNullExpr(GNUNullExpr *E);
+ void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ void VisitBlockExpr(BlockExpr *E);
+ void VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
+ void VisitObjCStringLiteral(ObjCStringLiteral *E);
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *E);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *E);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCImplicitSetterGetterRefExpr(
ObjCImplicitSetterGetterRefExpr *E);
- unsigned VisitObjCMessageExpr(ObjCMessageExpr *E);
- unsigned VisitObjCSuperExpr(ObjCSuperExpr *E);
- unsigned VisitObjCIsaExpr(ObjCIsaExpr *E);
-
- unsigned VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
- unsigned VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
- unsigned VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
- unsigned VisitObjCAtTryStmt(ObjCAtTryStmt *);
- unsigned VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
- unsigned VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
-
- unsigned VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E);
- unsigned VisitCXXConstructExpr(CXXConstructExpr *E);
- unsigned VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
- unsigned VisitCXXStaticCastExpr(CXXStaticCastExpr *E);
- unsigned VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E);
- unsigned VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E);
- unsigned VisitCXXConstCastExpr(CXXConstCastExpr *E);
- unsigned VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E);
- unsigned VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
- unsigned VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
- unsigned VisitCXXTypeidExpr(CXXTypeidExpr *E);
- unsigned VisitCXXThisExpr(CXXThisExpr *E);
- unsigned VisitCXXThrowExpr(CXXThrowExpr *E);
- unsigned VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
- unsigned VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCSuperExpr(ObjCSuperExpr *E);
+ void VisitObjCIsaExpr(ObjCIsaExpr *E);
+
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
+ void VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
+ void VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
+ void VisitObjCAtTryStmt(ObjCAtTryStmt *);
+ void VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
+ void VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
+
+ void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E);
+ void VisitCXXConstructExpr(CXXConstructExpr *E);
+ void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
+ void VisitCXXStaticCastExpr(CXXStaticCastExpr *E);
+ void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E);
+ void VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E);
+ void VisitCXXConstCastExpr(CXXConstCastExpr *E);
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E);
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
+ void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E);
+ void VisitCXXThisExpr(CXXThisExpr *E);
+ void VisitCXXThrowExpr(CXXThrowExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E);
- unsigned VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
- unsigned VisitCXXNewExpr(CXXNewExpr *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ void VisitCXXNewExpr(CXXNewExpr *E);
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E);
+ void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
- unsigned VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+
+ void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
+ void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
+ void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
+
+ void VisitOverloadExpr(OverloadExpr *E);
+ void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
+ void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
+
+ void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
};
}
-unsigned PCHStmtReader::VisitStmt(Stmt *S) {
+void PCHStmtReader::
+ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList,
+ unsigned NumTemplateArgs) {
+ TemplateArgumentListInfo ArgInfo;
+ ArgInfo.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+ ArgInfo.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx));
+ ArgList.initializeFrom(ArgInfo);
+}
+
+void PCHStmtReader::VisitStmt(Stmt *S) {
assert(Idx == NumStmtFields && "Incorrect statement field count");
- return 0;
}
-unsigned PCHStmtReader::VisitNullStmt(NullStmt *S) {
+void PCHStmtReader::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
S->setSemiLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitCompoundStmt(CompoundStmt *S) {
+void PCHStmtReader::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
+ llvm::SmallVector<Stmt *, 16> Stmts;
unsigned NumStmts = Record[Idx++];
- S->setStmts(*Reader.getContext(),
- StmtStack.data() + StmtStack.size() - NumStmts, NumStmts);
+ while (NumStmts--)
+ Stmts.push_back(Reader.ReadSubStmt());
+ S->setStmts(*Reader.getContext(), Stmts.data(), Stmts.size());
S->setLBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setRBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return NumStmts;
}
-unsigned PCHStmtReader::VisitSwitchCase(SwitchCase *S) {
+void PCHStmtReader::VisitSwitchCase(SwitchCase *S) {
VisitStmt(S);
Reader.RecordSwitchCaseID(S, Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitCaseStmt(CaseStmt *S) {
+void PCHStmtReader::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
- S->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 3]));
- S->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setSubStmt(StmtStack.back());
+ S->setLHS(Reader.ReadSubExpr());
+ S->setRHS(Reader.ReadSubExpr());
+ S->setSubStmt(Reader.ReadSubStmt());
S->setCaseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setEllipsisLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 3;
}
-unsigned PCHStmtReader::VisitDefaultStmt(DefaultStmt *S) {
+void PCHStmtReader::VisitDefaultStmt(DefaultStmt *S) {
VisitSwitchCase(S);
- S->setSubStmt(StmtStack.back());
+ S->setSubStmt(Reader.ReadSubStmt());
S->setDefaultLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitLabelStmt(LabelStmt *S) {
+void PCHStmtReader::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
S->setID(Reader.GetIdentifierInfo(Record, Idx));
- S->setSubStmt(StmtStack.back());
+ S->setSubStmt(Reader.ReadSubStmt());
S->setIdentLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
Reader.RecordLabelStmt(S, Record[Idx++]);
- return 1;
}
-unsigned PCHStmtReader::VisitIfStmt(IfStmt *S) {
+void PCHStmtReader::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
- S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
- S->setThen(StmtStack[StmtStack.size() - 2]);
- S->setElse(StmtStack[StmtStack.size() - 1]);
+ S->setConditionVariable(*Reader.getContext(),
+ cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setCond(Reader.ReadSubExpr());
+ S->setThen(Reader.ReadSubStmt());
+ S->setElse(Reader.ReadSubStmt());
S->setIfLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setElseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 3;
}
-unsigned PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) {
+void PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setBody(StmtStack.back());
+ S->setConditionVariable(*Reader.getContext(),
+ cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
S->setSwitchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
SwitchCase *PrevSC = 0;
for (unsigned N = Record.size(); Idx != N; ++Idx) {
@@ -224,78 +245,71 @@ unsigned PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) {
SC->Retain();
PrevSC = SC;
}
- return 2;
}
-unsigned PCHStmtReader::VisitWhileStmt(WhileStmt *S) {
+void PCHStmtReader::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setBody(StmtStack.back());
+ S->setConditionVariable(*Reader.getContext(),
+ cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitDoStmt(DoStmt *S) {
+void PCHStmtReader::VisitDoStmt(DoStmt *S) {
VisitStmt(S);
- S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setBody(StmtStack.back());
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
S->setDoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitForStmt(ForStmt *S) {
+void PCHStmtReader::VisitForStmt(ForStmt *S) {
VisitStmt(S);
- S->setInit(StmtStack[StmtStack.size() - 4]);
- S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 3]));
- S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- S->setInc(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setBody(StmtStack.back());
+ S->setInit(Reader.ReadSubStmt());
+ S->setCond(Reader.ReadSubExpr());
+ S->setConditionVariable(*Reader.getContext(),
+ cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setInc(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 4;
}
-unsigned PCHStmtReader::VisitGotoStmt(GotoStmt *S) {
+void PCHStmtReader::VisitGotoStmt(GotoStmt *S) {
VisitStmt(S);
Reader.SetLabelOf(S, Record[Idx++]);
S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+void PCHStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
VisitStmt(S);
S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- S->setTarget(cast_or_null<Expr>(StmtStack.back()));
- return 1;
+ S->setTarget(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitContinueStmt(ContinueStmt *S) {
+void PCHStmtReader::VisitContinueStmt(ContinueStmt *S) {
VisitStmt(S);
S->setContinueLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitBreakStmt(BreakStmt *S) {
+void PCHStmtReader::VisitBreakStmt(BreakStmt *S) {
VisitStmt(S);
S->setBreakLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitReturnStmt(ReturnStmt *S) {
+void PCHStmtReader::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
- S->setRetValue(cast_or_null<Expr>(StmtStack.back()));
+ S->setRetValue(Reader.ReadSubExpr());
S->setReturnLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setNRVOCandidate(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
- return 1;
}
-unsigned PCHStmtReader::VisitDeclStmt(DeclStmt *S) {
+void PCHStmtReader::VisitDeclStmt(DeclStmt *S) {
VisitStmt(S);
S->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
@@ -312,10 +326,9 @@ unsigned PCHStmtReader::VisitDeclStmt(DeclStmt *S) {
Decls.data(),
Decls.size())));
}
- return 0;
}
-unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) {
+void PCHStmtReader::VisitAsmStmt(AsmStmt *S) {
VisitStmt(S);
unsigned NumOutputs = Record[Idx++];
unsigned NumInputs = Record[Idx++];
@@ -326,9 +339,7 @@ unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) {
S->setSimple(Record[Idx++]);
S->setMSAsm(Record[Idx++]);
- unsigned StackIdx
- = StmtStack.size() - (NumOutputs*2 + NumInputs*2 + NumClobbers + 1);
- S->setAsmString(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
+ S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
// Outputs and inputs
llvm::SmallVector<IdentifierInfo *, 16> Names;
@@ -336,71 +347,76 @@ unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) {
llvm::SmallVector<Stmt*, 16> Exprs;
for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) {
Names.push_back(Reader.GetIdentifierInfo(Record, Idx));
- Constraints.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
- Exprs.push_back(StmtStack[StackIdx++]);
+ Constraints.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+ Exprs.push_back(Reader.ReadSubStmt());
}
// Constraints
llvm::SmallVector<StringLiteral*, 16> Clobbers;
for (unsigned I = 0; I != NumClobbers; ++I)
- Clobbers.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
+ Clobbers.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
S->setOutputsAndInputsAndClobbers(*Reader.getContext(),
Names.data(), Constraints.data(),
Exprs.data(), NumOutputs, NumInputs,
Clobbers.data(), NumClobbers);
-
- assert(StackIdx == StmtStack.size() && "Error deserializing AsmStmt");
- return NumOutputs*2 + NumInputs*2 + NumClobbers + 1;
}
-unsigned PCHStmtReader::VisitExpr(Expr *E) {
+void PCHStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
E->setType(Reader.GetType(Record[Idx++]));
E->setTypeDependent(Record[Idx++]);
E->setValueDependent(Record[Idx++]);
assert(Idx == NumExprFields && "Incorrect expression field count");
- return 0;
}
-unsigned PCHStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
+void PCHStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
+void PCHStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
+
+ bool HasQualifier = Record[Idx++];
+ unsigned NumTemplateArgs = Record[Idx++];
+
+ E->DecoratedD.setInt((HasQualifier? DeclRefExpr::HasQualifierFlag : 0) |
+ (NumTemplateArgs ? DeclRefExpr::HasExplicitTemplateArgumentListFlag : 0));
+
+ if (HasQualifier) {
+ E->getNameQualifier()->NNS = Reader.ReadNestedNameSpecifier(Record, Idx);
+ E->getNameQualifier()->Range = Reader.ReadSourceRange(Record, Idx);
+ }
+
+ if (NumTemplateArgs)
+ ReadExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList(),
+ NumTemplateArgs);
+
E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
- E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- // FIXME: read qualifier
- // FIXME: read explicit template arguments
- return 0;
+ E->setLocation(Reader.ReadSourceLocation(Record, Idx));
}
-unsigned PCHStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
+void PCHStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
VisitExpr(E);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setValue(Reader.ReadAPInt(Record, Idx));
- return 0;
}
-unsigned PCHStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
+void PCHStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
E->setValue(Reader.ReadAPFloat(Record, Idx));
E->setExact(Record[Idx++]);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+void PCHStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
VisitExpr(E);
- E->setSubExpr(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setSubExpr(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitStringLiteral(StringLiteral *E) {
+void PCHStmtReader::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
unsigned Len = Record[Idx++];
assert(Record[Idx] == E->getNumConcatenated() &&
@@ -416,35 +432,41 @@ unsigned PCHStmtReader::VisitStringLiteral(StringLiteral *E) {
// Read source locations
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
E->setStrTokenLoc(I, SourceLocation::getFromRawEncoding(Record[Idx++]));
-
- return 0;
}
-unsigned PCHStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
+void PCHStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
VisitExpr(E);
E->setValue(Record[Idx++]);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setWide(Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitParenExpr(ParenExpr *E) {
+void PCHStmtReader::VisitParenExpr(ParenExpr *E) {
VisitExpr(E);
E->setLParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setSubExpr(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void PCHStmtReader::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ unsigned NumExprs = Record[Idx++];
+ E->Exprs = new (*Reader.getContext()) Stmt*[NumExprs];
+ for (unsigned i = 0; i != NumExprs; ++i)
+ E->Exprs[i] = Reader.ReadSubStmt();
+ E->NumExprs = NumExprs;
+ E->LParenLoc = Reader.ReadSourceLocation(Record, Idx);
+ E->RParenLoc = Reader.ReadSourceLocation(Record, Idx);
}
-unsigned PCHStmtReader::VisitUnaryOperator(UnaryOperator *E) {
+void PCHStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
- E->setSubExpr(cast<Expr>(StmtStack.back()));
+ E->setSubExpr(Reader.ReadSubExpr());
E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
+void PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
typedef OffsetOfExpr::OffsetOfNode Node;
VisitExpr(E);
assert(E->getNumComponents() == Record[Idx]);
@@ -482,153 +504,141 @@ unsigned PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
}
for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
- E->setIndexExpr(I, cast_or_null<Expr>(StmtStack[StmtStack.size() - N + I]));
-
- return E->getNumExpressions();
+ E->setIndexExpr(I, Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+void PCHStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
VisitExpr(E);
E->setSizeof(Record[Idx++]);
if (Record[Idx] == 0) {
- E->setArgument(cast<Expr>(StmtStack.back()));
+ E->setArgument(Reader.ReadSubExpr());
++Idx;
} else {
E->setArgument(Reader.GetTypeSourceInfo(Record, Idx));
}
E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return E->isArgumentType()? 0 : 1;
}
-unsigned PCHStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+void PCHStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
VisitExpr(E);
- E->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 2]));
- E->setRHS(cast<Expr>(StmtStack[StmtStack.size() - 1]));
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
E->setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitCallExpr(CallExpr *E) {
+void PCHStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
E->setNumArgs(*Reader.getContext(), Record[Idx++]);
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setCallee(cast<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1]));
+ E->setCallee(Reader.ReadSubExpr());
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I]));
- return E->getNumArgs() + 1;
+ E->setArg(I, Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitMemberExpr(MemberExpr *E) {
- VisitExpr(E);
- E->setBase(cast<Expr>(StmtStack.back()));
- E->setMemberDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
- E->setMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setArrow(Record[Idx++]);
- return 1;
+void PCHStmtReader::VisitMemberExpr(MemberExpr *E) {
+ // Don't call VisitExpr, this is fully initialized at creation.
+ assert(E->getStmtClass() == Stmt::MemberExprClass &&
+ "It's a subclass, we must advance Idx!");
}
-unsigned PCHStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
+void PCHStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
VisitExpr(E);
- E->setBase(cast<Expr>(StmtStack.back()));
+ E->setBase(Reader.ReadSubExpr());
E->setIsaMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setArrow(Record[Idx++]);
- return 1;
}
-unsigned PCHStmtReader::VisitCastExpr(CastExpr *E) {
+void PCHStmtReader::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
- E->setSubExpr(cast<Expr>(StmtStack.back()));
+ E->setSubExpr(Reader.ReadSubExpr());
E->setCastKind((CastExpr::CastKind)Record[Idx++]);
- return 1;
+ CXXBaseSpecifierArray &BasePath = E->getBasePath();
+ unsigned NumBaseSpecs = Record[Idx++];
+ while (NumBaseSpecs--) {
+ // FIXME: These gets leaked.
+ CXXBaseSpecifier *BaseSpec = new (*Reader.getContext()) CXXBaseSpecifier;
+ *BaseSpec = Reader.ReadCXXBaseSpecifier(Record, Idx);
+ BasePath.push_back(BaseSpec);
+ }
}
-unsigned PCHStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+void PCHStmtReader::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
- E->setLHS(cast<Expr>(StmtStack.end()[-2]));
- E->setRHS(cast<Expr>(StmtStack.end()[-1]));
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+void PCHStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
VisitBinaryOperator(E);
E->setComputationLHSType(Reader.GetType(Record[Idx++]));
E->setComputationResultType(Reader.GetType(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
+void PCHStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
VisitExpr(E);
- E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
- E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1]));
+ E->setCond(Reader.ReadSubExpr());
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
E->setQuestionLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 3;
}
-unsigned PCHStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+void PCHStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
E->setLvalueCast(Record[Idx++]);
- return 1;
}
-unsigned PCHStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+void PCHStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
VisitCastExpr(E);
E->setTypeInfoAsWritten(Reader.GetTypeSourceInfo(Record, Idx));
- return 1;
}
-unsigned PCHStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
+void PCHStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
VisitExplicitCastExpr(E);
E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+void PCHStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
VisitExpr(E);
E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx));
- E->setInitializer(cast<Expr>(StmtStack.back()));
+ E->setInitializer(Reader.ReadSubExpr());
E->setFileScope(Record[Idx++]);
- return 1;
}
-unsigned PCHStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+void PCHStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
VisitExpr(E);
- E->setBase(cast<Expr>(StmtStack.back()));
+ E->setBase(Reader.ReadSubExpr());
E->setAccessor(Reader.GetIdentifierInfo(Record, Idx));
E->setAccessorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitInitListExpr(InitListExpr *E) {
+void PCHStmtReader::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
unsigned NumInits = Record[Idx++];
E->reserveInits(*Reader.getContext(), NumInits);
for (unsigned I = 0; I != NumInits; ++I)
- E->updateInit(*Reader.getContext(), I,
- cast<Expr>(StmtStack[StmtStack.size() - NumInits - 1 + I]));
- E->setSyntacticForm(cast_or_null<InitListExpr>(StmtStack.back()));
+ E->updateInit(*Reader.getContext(), I, Reader.ReadSubExpr());
+ E->setSyntacticForm(cast_or_null<InitListExpr>(Reader.ReadSubStmt()));
E->setLBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setInitializedFieldInUnion(
cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++])));
E->sawArrayRangeDesignator(Record[Idx++]);
- return NumInits + 1;
}
-unsigned PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+void PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
typedef DesignatedInitExpr::Designator Designator;
VisitExpr(E);
unsigned NumSubExprs = Record[Idx++];
assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
for (unsigned I = 0; I != NumSubExprs; ++I)
- E->setSubExpr(I, cast<Expr>(StmtStack[StmtStack.size() - NumSubExprs + I]));
+ E->setSubExpr(I, Reader.ReadSubExpr());
E->setEqualOrColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setGNUSyntax(Record[Idx++]);
@@ -683,143 +693,128 @@ unsigned PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
}
E->setDesignators(*Reader.getContext(),
Designators.data(), Designators.size());
-
- return NumSubExprs;
}
-unsigned PCHStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+void PCHStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
VisitExpr(E);
- return 0;
}
-unsigned PCHStmtReader::VisitVAArgExpr(VAArgExpr *E) {
+void PCHStmtReader::VisitVAArgExpr(VAArgExpr *E) {
VisitExpr(E);
- E->setSubExpr(cast<Expr>(StmtStack.back()));
+ E->setSubExpr(Reader.ReadSubExpr());
E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
+void PCHStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
E->setAmpAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
Reader.SetLabelOf(E, Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitStmtExpr(StmtExpr *E) {
+void PCHStmtReader::VisitStmtExpr(StmtExpr *E) {
VisitExpr(E);
E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setSubStmt(cast_or_null<CompoundStmt>(StmtStack.back()));
- return 1;
+ E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt()));
}
-unsigned PCHStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
+void PCHStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
VisitExpr(E);
E->setArgType1(Reader.GetType(Record[Idx++]));
E->setArgType2(Reader.GetType(Record[Idx++]));
E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitChooseExpr(ChooseExpr *E) {
+void PCHStmtReader::VisitChooseExpr(ChooseExpr *E) {
VisitExpr(E);
- E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
- E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1]));
+ E->setCond(Reader.ReadSubExpr());
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 3;
}
-unsigned PCHStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
+void PCHStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
VisitExpr(E);
E->setTokenLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+void PCHStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
VisitExpr(E);
+ llvm::SmallVector<Expr *, 16> Exprs;
unsigned NumExprs = Record[Idx++];
- E->setExprs(*Reader.getContext(),
- (Expr **)&StmtStack[StmtStack.size() - NumExprs], NumExprs);
+ while (NumExprs--)
+ Exprs.push_back(Reader.ReadSubExpr());
+ E->setExprs(*Reader.getContext(), Exprs.data(), Exprs.size());
E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return NumExprs;
}
-unsigned PCHStmtReader::VisitBlockExpr(BlockExpr *E) {
+void PCHStmtReader::VisitBlockExpr(BlockExpr *E) {
VisitExpr(E);
E->setBlockDecl(cast_or_null<BlockDecl>(Reader.GetDecl(Record[Idx++])));
E->setHasBlockDeclRefExprs(Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+void PCHStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
VisitExpr(E);
E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setByRef(Record[Idx++]);
E->setConstQualAdded(Record[Idx++]);
- return 0;
+ E->setCopyConstructorExpr(Reader.ReadSubExpr());
}
//===----------------------------------------------------------------------===//
// Objective-C Expressions and Statements
-unsigned PCHStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+void PCHStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
VisitExpr(E);
- E->setString(cast<StringLiteral>(StmtStack.back()));
+ E->setString(cast<StringLiteral>(Reader.ReadSubStmt()));
E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+void PCHStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
VisitExpr(E);
E->setEncodedTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx));
E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+void PCHStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
VisitExpr(E);
E->setSelector(Reader.GetSelector(Record, Idx));
E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+void PCHStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
VisitExpr(E);
E->setProtocol(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+void PCHStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
VisitExpr(E);
E->setDecl(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setBase(cast<Expr>(StmtStack.back()));
+ E->setBase(Reader.ReadSubExpr());
E->setIsArrow(Record[Idx++]);
E->setIsFreeIvar(Record[Idx++]);
- return 1;
}
-unsigned PCHStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+void PCHStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
E->setProperty(cast<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setBase(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setBase(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr(
+void PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr(
ObjCImplicitSetterGetterRefExpr *E) {
VisitExpr(E);
E->setGetterMethod(
@@ -828,13 +823,12 @@ unsigned PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr(
cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
E->setInterfaceDecl(
cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
- E->setBase(cast_or_null<Expr>(StmtStack.back()));
+ E->setBase(Reader.ReadSubExpr());
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+void PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
assert(Record[Idx] == E->getNumArgs());
++Idx;
@@ -842,8 +836,7 @@ unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
= static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
switch (Kind) {
case ObjCMessageExpr::Instance:
- E->setInstanceReceiver(
- cast_or_null<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1]));
+ E->setInstanceReceiver(Reader.ReadSubExpr());
break;
case ObjCMessageExpr::Class:
@@ -870,193 +863,185 @@ unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
E->setRightLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I]));
- return E->getNumArgs() + (Kind == ObjCMessageExpr::Instance);
+ E->setArg(I, Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) {
+void PCHStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) {
VisitExpr(E);
E->setLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+void PCHStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
VisitStmt(S);
- S->setElement(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 3]));
- S->setCollection(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
- S->setBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setElement(Reader.ReadSubStmt());
+ S->setCollection(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 3;
}
-unsigned PCHStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+void PCHStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
VisitStmt(S);
- S->setCatchBody(cast_or_null<Stmt>(StmtStack.back()));
+ S->setCatchBody(Reader.ReadSubStmt());
S->setCatchParamDecl(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
S->setAtCatchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+void PCHStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
VisitStmt(S);
- S->setFinallyBody(StmtStack.back());
+ S->setFinallyBody(Reader.ReadSubStmt());
S->setAtFinallyLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
-unsigned PCHStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+void PCHStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
VisitStmt(S);
assert(Record[Idx] == S->getNumCatchStmts());
++Idx;
bool HasFinally = Record[Idx++];
- for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
- unsigned Offset = StmtStack.size() - N - HasFinally + I;
- S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(StmtStack[Offset]));
- }
+ S->setTryBody(Reader.ReadSubStmt());
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
+ S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Reader.ReadSubStmt()));
- unsigned TryOffset
- = StmtStack.size() - S->getNumCatchStmts() - HasFinally - 1;
- S->setTryBody(cast_or_null<Stmt>(StmtStack[TryOffset]));
if (HasFinally)
- S->setFinallyStmt(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setFinallyStmt(Reader.ReadSubStmt());
S->setAtTryLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1 + S->getNumCatchStmts() + HasFinally;
}
-unsigned PCHStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+void PCHStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
VisitStmt(S);
- S->setSynchExpr(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 2]));
- S->setSynchBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setSynchExpr(Reader.ReadSubStmt());
+ S->setSynchBody(Reader.ReadSubStmt());
S->setAtSynchronizedLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 2;
}
-unsigned PCHStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+void PCHStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
VisitStmt(S);
- S->setThrowExpr(StmtStack.back());
+ S->setThrowExpr(Reader.ReadSubStmt());
S->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 1;
}
//===----------------------------------------------------------------------===//
// C++ Expressions and Statements
-unsigned PCHStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
- unsigned num = VisitCallExpr(E);
+void PCHStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ VisitCallExpr(E);
E->setOperator((OverloadedOperatorKind)Record[Idx++]);
- return num;
}
-unsigned PCHStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
+void PCHStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
VisitExpr(E);
+ E->NumArgs = Record[Idx++];
+ if (E->NumArgs)
+ E->Args = new (*Reader.getContext()) Stmt*[E->NumArgs];
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
E->setConstructor(cast<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++])));
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setElidable(Record[Idx++]);
E->setRequiresZeroInitialization(Record[Idx++]);
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I]));
E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]);
- return E->getNumArgs();
}
-unsigned PCHStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
- unsigned num = VisitExplicitCastExpr(E);
+void PCHStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ E->TyBeginLoc = Reader.ReadSourceLocation(Record, Idx);
+ E->RParenLoc = Reader.ReadSourceLocation(Record, Idx);
+}
+
+void PCHStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ VisitExplicitCastExpr(E);
E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return num;
}
-unsigned PCHStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+void PCHStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
-unsigned PCHStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+void PCHStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
-unsigned PCHStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
+void PCHStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
-unsigned PCHStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+void PCHStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
return VisitCXXNamedCastExpr(E);
}
-unsigned PCHStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
- unsigned num = VisitExplicitCastExpr(E);
+void PCHStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
+ VisitExplicitCastExpr(E);
E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return num;
}
-unsigned PCHStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+void PCHStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
VisitExpr(E);
E->setValue(Record[Idx++]);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+void PCHStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
VisitExpr(E);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+void PCHStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
VisitExpr(E);
E->setSourceRange(Reader.ReadSourceRange(Record, Idx));
if (E->isTypeOperand()) { // typeid(int)
E->setTypeOperandSourceInfo(Reader.GetTypeSourceInfo(Record, Idx));
- return 0;
+ return;
}
// typeid(42+2)
- E->setExprOperand(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setExprOperand(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
+void PCHStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setImplicit(Record[Idx++]);
- return 0;
}
-unsigned PCHStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
+void PCHStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
E->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- E->setSubExpr(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setSubExpr(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+void PCHStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
- E->setUsedLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
- bool HasStoredExpr = Record[Idx++];
- if (!HasStoredExpr) return 0;
- E->setExpr(cast<Expr>(StmtStack.back()));
- return 1;
+
+ assert(Record[Idx] == E->Param.getInt() && "We messed up at creation ?");
+ ++Idx; // HasOtherExprStored and SubExpr was handled during creation.
+ E->Param.setPointer(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
+ E->Loc = Reader.ReadSourceLocation(Record, Idx);
}
-unsigned PCHStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+void PCHStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
VisitExpr(E);
E->setTemporary(Reader.ReadCXXTemporary(Record, Idx));
- E->setSubExpr(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setSubExpr(Reader.ReadSubExpr());
}
-unsigned PCHStmtReader::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+void PCHStmtReader::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E) {
+ VisitExpr(E);
+ E->SubExpr = Reader.ReadSubExpr();
+ E->ExtendsLifetime = Record[Idx++];
+ E->RequiresTemporaryCopy = Record[Idx++];
+}
+
+void PCHStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
- return 0;
}
-unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
+void PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
E->setGlobalNew(Record[Idx++]);
- E->setParenTypeId(Record[Idx++]);
E->setHasInitializer(Record[Idx++]);
bool isArray = Record[Idx++];
unsigned NumPlacementArgs = Record[Idx++];
@@ -1066,6 +1051,10 @@ unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
E->setConstructor(
cast_or_null<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++])));
+ SourceRange TypeIdParens;
+ TypeIdParens.setBegin(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ TypeIdParens.setEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->TypeIdParens = TypeIdParens;
E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
E->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
@@ -1073,17 +1062,41 @@ unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
NumCtorArgs);
// Install all the subexpressions.
- unsigned TotalSubExprs = E->raw_arg_end()-E->raw_arg_begin();
- unsigned SSIdx = StmtStack.size()-TotalSubExprs;
for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
I != e; ++I)
- *I = StmtStack[SSIdx++];
-
- return TotalSubExprs;
+ *I = Reader.ReadSubStmt();
+}
+
+void PCHStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ E->setGlobalDelete(Record[Idx++]);
+ E->setArrayForm(Record[Idx++]);
+ E->setOperatorDelete(
+ cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setArgument(Reader.ReadSubExpr());
+ E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
}
+void PCHStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
-unsigned PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ E->setBase(Reader.ReadSubExpr());
+ E->setArrow(Record[Idx++]);
+ E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
+ E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ E->setScopeTypeInfo(Reader.GetTypeSourceInfo(Record, Idx));
+ E->setColonColonLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setTildeLoc(Reader.ReadSourceLocation(Record, Idx));
+
+ IdentifierInfo *II = Reader.GetIdentifierInfo(Record, Idx);
+ if (II)
+ E->setDestroyedType(II, Reader.ReadSourceLocation(Record, Idx));
+ else
+ E->setDestroyedType(Reader.GetTypeSourceInfo(Record, Idx));
+}
+
+void PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
VisitExpr(E);
unsigned NumTemps = Record[Idx++];
if (NumTemps) {
@@ -1091,23 +1104,152 @@ unsigned PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
for (unsigned i = 0; i != NumTemps; ++i)
E->setTemporary(i, Reader.ReadCXXTemporary(Record, Idx));
}
- E->setSubExpr(cast<Expr>(StmtStack.back()));
- return 1;
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void
+PCHStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ unsigned NumTemplateArgs = Record[Idx++];
+ assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
+ "Read wrong record during creation ?");
+ if (E->hasExplicitTemplateArgs())
+ ReadExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList(),
+ NumTemplateArgs);
+
+ E->setBase(Reader.ReadSubExpr());
+ E->setBaseType(Reader.GetType(Record[Idx++]));
+ E->setArrow(Record[Idx++]);
+ E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
+ E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ E->setFirstQualifierFoundInScope(
+ cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setMember(Reader.ReadDeclarationName(Record, Idx));
+ E->setMemberLoc(Reader.ReadSourceLocation(Record, Idx));
+}
+
+void
+PCHStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ unsigned NumTemplateArgs = Record[Idx++];
+ assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
+ "Read wrong record during creation ?");
+ if (E->hasExplicitTemplateArgs())
+ ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
+ NumTemplateArgs);
+
+ E->setDeclName(Reader.ReadDeclarationName(Record, Idx));
+ E->setLocation(Reader.ReadSourceLocation(Record, Idx));
+ E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
+}
+
+void
+PCHStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ assert(Record[Idx] == E->arg_size() && "Read wrong record during creation ?");
+ ++Idx; // NumArgs;
+ for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+ E->setTypeBeginLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setTypeAsWritten(Reader.GetType(Record[Idx++]));
+ E->setLParenLoc(Reader.ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(Reader.ReadSourceLocation(Record, Idx));
+}
+
+void PCHStmtReader::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ unsigned NumTemplateArgs = Record[Idx++];
+ assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() &&
+ "Read wrong record during creation ?");
+ if (E->hasExplicitTemplateArgs())
+ ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(),
+ NumTemplateArgs);
+
+ unsigned NumDecls = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ NamedDecl *D = cast<NamedDecl>(Reader.GetDecl(Record[Idx++]));
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Decls.addDecl(D, AS);
+ }
+ E->initializeResults(*Reader.getContext(), Decls.begin(), Decls.end());
+
+ E->setName(Reader.ReadDeclarationName(Record, Idx));
+ E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx));
+ E->setQualifierRange(Reader.ReadSourceRange(Record, Idx));
+ E->setNameLoc(Reader.ReadSourceLocation(Record, Idx));
+}
+
+void PCHStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ E->setArrow(Record[Idx++]);
+ E->setHasUnresolvedUsing(Record[Idx++]);
+ E->setBase(Reader.ReadSubExpr());
+ E->setBaseType(Reader.GetType(Record[Idx++]));
+ E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx));
+}
+
+void PCHStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ E->setRequiresADL(Record[Idx++]);
+ E->setOverloaded(Record[Idx++]);
+ E->setNamingClass(cast_or_null<CXXRecordDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+void PCHStmtReader::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->UTT = (UnaryTypeTrait)Record[Idx++];
+ SourceRange Range = Reader.ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->QueriedType = Reader.GetType(Record[Idx++]);
+}
+
+Stmt *PCHReader::ReadStmt() {
+ switch (ReadingKind) {
+ case Read_Decl:
+ case Read_Type:
+ // Read a statement from the current DeclCursor.
+ return ReadStmtFromStream(DeclsCursor);
+ case Read_Stmt:
+ return ReadSubStmt();
+ }
+
+ llvm_unreachable("ReadingKind not set ?");
+ return 0;
}
+Expr *PCHReader::ReadExpr() {
+ return cast_or_null<Expr>(ReadStmt());
+}
+
+Expr *PCHReader::ReadSubExpr() {
+ return cast_or_null<Expr>(ReadSubStmt());
+}
// Within the bitstream, expressions are stored in Reverse Polish
// Notation, with each of the subexpressions preceding the
-// expression they are stored in. To evaluate expressions, we
-// continue reading expressions and placing them on the stack, with
-// expressions having operands removing those operands from the
+// expression they are stored in. Subexpressions are stored from last to first.
+// To evaluate expressions, we continue reading expressions and placing them on
+// the stack, with expressions having operands removing those operands from the
// stack. Evaluation terminates when we see a STMT_STOP record, and
// the single remaining expression on the stack is our result.
-Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
+Stmt *PCHReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) {
+
+ ReadingKindTracker ReadingKind(Read_Stmt, *this);
+
+#ifndef NDEBUG
+ unsigned PrevNumStmts = StmtStack.size();
+#endif
+
RecordData Record;
unsigned Idx;
- llvm::SmallVector<Stmt *, 16> StmtStack;
- PCHStmtReader Reader(*this, Record, Idx, StmtStack);
+ PCHStmtReader Reader(*this, Record, Idx);
Stmt::EmptyShell Empty;
while (true) {
@@ -1221,7 +1363,9 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
break;
case pch::EXPR_DECL_REF:
- S = new (Context) DeclRefExpr(Empty);
+ S = DeclRefExpr::CreateEmpty(*Context,
+ /*HasQualifier=*/Record[PCHStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields + 1]);
break;
case pch::EXPR_INTEGER_LITERAL:
@@ -1249,6 +1393,10 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
S = new (Context) ParenExpr(Empty);
break;
+ case pch::EXPR_PAREN_LIST:
+ S = new (Context) ParenListExpr(Empty);
+ break;
+
case pch::EXPR_UNARY_OPERATOR:
S = new (Context) UnaryOperator(Empty);
break;
@@ -1271,9 +1419,43 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
S = new (Context) CallExpr(*Context, Stmt::CallExprClass, Empty);
break;
- case pch::EXPR_MEMBER:
- S = new (Context) MemberExpr(Empty);
+ case pch::EXPR_MEMBER: {
+ // We load everything here and fully initialize it at creation.
+ // That way we can use MemberExpr::Create and don't have to duplicate its
+ // logic with a MemberExpr::CreateEmpty.
+
+ assert(Idx == 0);
+ NestedNameSpecifier *NNS = 0;
+ SourceRange QualifierRange;
+ if (Record[Idx++]) { // HasQualifier.
+ NNS = ReadNestedNameSpecifier(Record, Idx);
+ QualifierRange = ReadSourceRange(Record, Idx);
+ }
+
+ TemplateArgumentListInfo ArgInfo;
+ unsigned NumTemplateArgs = Record[Idx++];
+ if (NumTemplateArgs) {
+ ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(ReadTemplateArgumentLoc(Record, Idx));
+ }
+
+ NamedDecl *FoundD = cast_or_null<NamedDecl>(GetDecl(Record[Idx++]));
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
+
+ QualType T = GetType(Record[Idx++]);
+ Expr *Base = ReadSubExpr();
+ ValueDecl *MemberD = cast<ValueDecl>(GetDecl(Record[Idx++]));
+ SourceLocation MemberLoc = ReadSourceLocation(Record, Idx);
+ bool IsArrow = Record[Idx++];
+
+ S = MemberExpr::Create(*Context, Base, IsArrow, NNS, QualifierRange,
+ MemberD, FoundDecl, MemberLoc,
+ NumTemplateArgs ? &ArgInfo : 0, T);
break;
+ }
case pch::EXPR_BINARY_OPERATOR:
S = new (Context) BinaryOperator(Empty);
@@ -1414,8 +1596,11 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
break;
case pch::EXPR_CXX_CONSTRUCT:
- S = new (Context) CXXConstructExpr(Empty, *Context,
- Record[PCHStmtReader::NumExprFields + 2]);
+ S = new (Context) CXXConstructExpr(Empty);
+ break;
+
+ case pch::EXPR_CXX_TEMPORARY_OBJECT:
+ S = new (Context) CXXTemporaryObjectExpr(Empty);
break;
case pch::EXPR_CXX_STATIC_CAST:
@@ -1457,43 +1642,86 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
case pch::EXPR_CXX_THROW:
S = new (Context) CXXThrowExpr(Empty);
break;
- case pch::EXPR_CXX_DEFAULT_ARG:
- S = new (Context) CXXDefaultArgExpr(Empty);
+ case pch::EXPR_CXX_DEFAULT_ARG: {
+ bool HasOtherExprStored = Record[PCHStmtReader::NumExprFields];
+ if (HasOtherExprStored) {
+ Expr *SubExpr = ReadSubExpr();
+ S = CXXDefaultArgExpr::Create(*Context, SourceLocation(), 0, SubExpr);
+ } else
+ S = new (Context) CXXDefaultArgExpr(Empty);
break;
+ }
case pch::EXPR_CXX_BIND_TEMPORARY:
S = new (Context) CXXBindTemporaryExpr(Empty);
break;
+ case pch::EXPR_CXX_BIND_REFERENCE:
+ S = new (Context) CXXBindReferenceExpr(Empty);
+ break;
- case pch::EXPR_CXX_ZERO_INIT_VALUE:
- S = new (Context) CXXZeroInitValueExpr(Empty);
+ case pch::EXPR_CXX_SCALAR_VALUE_INIT:
+ S = new (Context) CXXScalarValueInitExpr(Empty);
break;
case pch::EXPR_CXX_NEW:
S = new (Context) CXXNewExpr(Empty);
break;
-
+ case pch::EXPR_CXX_DELETE:
+ S = new (Context) CXXDeleteExpr(Empty);
+ break;
+ case pch::EXPR_CXX_PSEUDO_DESTRUCTOR:
+ S = new (Context) CXXPseudoDestructorExpr(Empty);
+ break;
case pch::EXPR_CXX_EXPR_WITH_TEMPORARIES:
S = new (Context) CXXExprWithTemporaries(Empty);
break;
+
+ case pch::EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
+ S = CXXDependentScopeMemberExpr::CreateEmpty(*Context,
+ /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]);
+ break;
+
+ case pch::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
+ S = DependentScopeDeclRefExpr::CreateEmpty(*Context,
+ /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]);
+ break;
+
+ case pch::EXPR_CXX_UNRESOLVED_CONSTRUCT:
+ S = CXXUnresolvedConstructExpr::CreateEmpty(*Context,
+ /*NumArgs=*/Record[PCHStmtReader::NumExprFields]);
+ break;
+
+ case pch::EXPR_CXX_UNRESOLVED_MEMBER:
+ S = UnresolvedMemberExpr::CreateEmpty(*Context,
+ /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]);
+ break;
+
+ case pch::EXPR_CXX_UNRESOLVED_LOOKUP:
+ S = UnresolvedLookupExpr::CreateEmpty(*Context,
+ /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]);
+ break;
+
+ case pch::EXPR_CXX_UNARY_TYPE_TRAIT:
+ S = new (Context) UnaryTypeTraitExpr(Empty);
+ break;
}
-
+
// We hit a STMT_STOP, so we're done with this expression.
if (Finished)
break;
++NumStatementsRead;
- if (S) {
- unsigned NumSubStmts = Reader.Visit(S);
- while (NumSubStmts > 0) {
- StmtStack.pop_back();
- --NumSubStmts;
- }
- }
+ if (S)
+ Reader.Visit(S);
assert(Idx == Record.size() && "Invalid deserialization of statement");
StmtStack.push_back(S);
}
- assert(StmtStack.size() == 1 && "Extra expressions on stack!");
- return StmtStack.back();
+
+#ifndef NDEBUG
+ assert(StmtStack.size() > PrevNumStmts && "Read too many sub stmts!");
+ assert(StmtStack.size() == PrevNumStmts + 1 && "Extra expressions on stack!");
+#endif
+
+ return StmtStack.pop_back_val();
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp
index 3d5b7d8..093c1e3 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
+#include "clang/Frontend/PCHReader.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
@@ -61,9 +62,7 @@ namespace {
#define TYPE(Class, Base) void Visit##Class##Type(const Class##Type *T);
#define ABSTRACT_TYPE(Class, Base)
-#define DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
- void VisitInjectedClassNameType(const InjectedClassNameType *T);
};
}
@@ -130,8 +129,7 @@ void PCHTypeWriter::VisitVariableArrayType(const VariableArrayType *T) {
void PCHTypeWriter::VisitVectorType(const VectorType *T) {
Writer.AddTypeRef(T->getElementType(), Record);
Record.push_back(T->getNumElements());
- Record.push_back(T->isAltiVec());
- Record.push_back(T->isPixel());
+ Record.push_back(T->getAltiVecSpecific());
Code = pch::TYPE_VECTOR;
}
@@ -169,16 +167,15 @@ void PCHTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
Code = pch::TYPE_FUNCTION_PROTO;
}
-#if 0
-// For when we want it....
void PCHTypeWriter::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
Writer.AddDeclRef(T->getDecl(), Record);
Code = pch::TYPE_UNRESOLVED_USING;
}
-#endif
void PCHTypeWriter::VisitTypedefType(const TypedefType *T) {
Writer.AddDeclRef(T->getDecl(), Record);
+ assert(!T->isCanonicalUnqualified() && "Invalid typedef ?");
+ Writer.AddTypeRef(T->getCanonicalTypeInternal(), Record);
Code = pch::TYPE_TYPEDEF;
}
@@ -198,6 +195,7 @@ void PCHTypeWriter::VisitDecltypeType(const DecltypeType *T) {
}
void PCHTypeWriter::VisitTagType(const TagType *T) {
+ Record.push_back(T->isDependentType());
Writer.AddDeclRef(T->getDecl(), Record);
assert(!T->isBeingDefined() &&
"Cannot serialize in the middle of a type definition");
@@ -224,15 +222,70 @@ PCHTypeWriter::VisitSubstTemplateTypeParmType(
void
PCHTypeWriter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
+ Record.push_back(T->isDependentType());
+ Writer.AddTemplateName(T->getTemplateName(), Record);
+ Record.push_back(T->getNumArgs());
+ for (TemplateSpecializationType::iterator ArgI = T->begin(), ArgE = T->end();
+ ArgI != ArgE; ++ArgI)
+ Writer.AddTemplateArgument(*ArgI, Record);
+ Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = pch::TYPE_TEMPLATE_SPECIALIZATION;
+}
+
+void
+PCHTypeWriter::VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddStmt(T->getSizeExpr());
+ Writer.AddSourceRange(T->getBracketsRange(), Record);
+ Code = pch::TYPE_DEPENDENT_SIZED_ARRAY;
+}
+
+void
+PCHTypeWriter::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
// FIXME: Serialize this type (C++ only)
- assert(false && "Cannot serialize template specialization types");
+ assert(false && "Cannot serialize dependent sized extended vector types");
+}
+
+void
+PCHTypeWriter::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ Record.push_back(T->getDepth());
+ Record.push_back(T->getIndex());
+ Record.push_back(T->isParameterPack());
+ Writer.AddIdentifierRef(T->getName(), Record);
+ Code = pch::TYPE_TEMPLATE_TYPE_PARM;
+}
+
+void
+PCHTypeWriter::VisitDependentNameType(const DependentNameType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = pch::TYPE_DEPENDENT_NAME;
+}
+
+void
+PCHTypeWriter::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Record.push_back(T->getNumArgs());
+ for (DependentTemplateSpecializationType::iterator
+ I = T->begin(), E = T->end(); I != E; ++I)
+ Writer.AddTemplateArgument(*I, Record);
+ Code = pch::TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION;
}
void PCHTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
- Writer.AddTypeRef(T->getNamedType(), Record);
Record.push_back(T->getKeyword());
- // FIXME: Serialize the qualifier (C++ only)
- assert(T->getQualifier() == 0 && "Cannot serialize qualified name types");
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddTypeRef(T->getNamedType(), Record);
Code = pch::TYPE_ELABORATED;
}
@@ -394,7 +447,8 @@ void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- Writer.AddTemplateArgumentLoc(TL.getArgLoc(i), Record);
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
+ TL.getArgLoc(i).getLocInfo(), Record);
}
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
@@ -408,6 +462,17 @@ void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
Writer.AddSourceRange(TL.getQualifierRange(), Record);
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
+void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getKeywordLoc(), Record);
+ Writer.AddSourceRange(TL.getQualifierRange(), Record);
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
+ TL.getArgLoc(I).getLocInfo(), Record);
+}
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
@@ -564,6 +629,7 @@ void PCHWriter::WriteBlockInfoBlock() {
RECORD(VERSION_CONTROL_BRANCH_REVISION);
RECORD(UNUSED_STATIC_FUNCS);
RECORD(MACRO_DEFINITION_OFFSETS);
+ RECORD(CHAINED_METADATA);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -683,24 +749,27 @@ void PCHWriter::WriteMetadata(ASTContext &Context, const char *isysroot) {
// Metadata
const TargetInfo &Target = Context.Target;
BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev();
- MetaAbbrev->Add(BitCodeAbbrevOp(pch::METADATA));
+ MetaAbbrev->Add(BitCodeAbbrevOp(
+ Chain ? pch::CHAINED_METADATA : pch::METADATA));
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH major
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH minor
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor
MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
- MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple
+ // Target triple or chained PCH name
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev);
RecordData Record;
- Record.push_back(pch::METADATA);
+ Record.push_back(Chain ? pch::CHAINED_METADATA : pch::METADATA);
Record.push_back(pch::VERSION_MAJOR);
Record.push_back(pch::VERSION_MINOR);
Record.push_back(CLANG_VERSION_MAJOR);
Record.push_back(CLANG_VERSION_MINOR);
Record.push_back(isysroot != 0);
- const std::string &TripleStr = Target.getTriple().getTriple();
- Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, TripleStr);
+ // FIXME: This writes the absolute path for chained headers.
+ const std::string &BlobStr = Chain ? Chain->getFileName() : Target.getTriple().getTriple();
+ Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, BlobStr);
// Original file name
SourceManager &SM = Context.getSourceManager();
@@ -779,11 +848,8 @@ void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.MathErrno); // Math functions must respect errno
// (modulo the platform support).
- Record.push_back(LangOpts.OverflowChecking); // Extension to call a handler function when
- // signed integer arithmetic overflows.
-
- Record.push_back(LangOpts.HeinousExtensions); // Extensions that we really don't like and
- // may be ripped out at any time.
+ Record.push_back(LangOpts.getSignedOverflowBehavior());
+ Record.push_back(LangOpts.HeinousExtensions);
Record.push_back(LangOpts.Optimize); // Whether __OPTIMIZE__ should be defined.
Record.push_back(LangOpts.OptimizeSize); // Whether __OPTIMIZE_SIZE__ should be
@@ -807,6 +873,7 @@ void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
Record.push_back(LangOpts.OpenCL);
Record.push_back(LangOpts.CatchUndefined);
Record.push_back(LangOpts.ElideConstructors);
+ Record.push_back(LangOpts.SpellChecking);
Stream.EmitRecord(pch::LANGUAGE_OPTIONS, Record);
}
@@ -866,8 +933,7 @@ public:
} // end anonymous namespace
/// \brief Write the stat() system call cache to the PCH file.
-void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls,
- const char *isysroot) {
+void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
// Build the on-disk hash table containing information about every
// stat() call.
OnDiskChainedHashTableGenerator<PCHStatCacheTrait> Generator;
@@ -876,7 +942,6 @@ void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls,
StatEnd = StatCalls.end();
Stat != StatEnd; ++Stat, ++NumStatEntries) {
const char *Filename = Stat->first();
- Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
Generator.insert(Filename, Stat->second);
}
@@ -1347,16 +1412,7 @@ void PCHWriter::WriteType(QualType T) {
#define TYPE(Class, Base) \
case Type::Class: W.Visit##Class##Type(cast<Class##Type>(T)); break;
#define ABSTRACT_TYPE(Class, Base)
-#define DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
-
- // For all of the dependent type nodes (which only occur in C++
- // templates), produce an error.
-#define TYPE(Class, Base)
-#define DEPENDENT_TYPE(Class, Base) case Type::Class:
-#include "clang/AST/TypeNodes.def"
- assert(false && "Cannot serialize dependent type nodes");
- break;
}
}
@@ -1402,11 +1458,16 @@ uint64_t PCHWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
if (DC->getPrimaryContext() != DC)
return 0;
- // Since there is no name lookup into functions or methods, and we
- // perform name lookup for the translation unit via the
- // IdentifierInfo chains, don't bother to build a
- // visible-declarations table for these entities.
- if (DC->isFunctionOrMethod() || DC->isTranslationUnit())
+ // Since there is no name lookup into functions or methods, don't bother to
+ // build a visible-declarations table for these entities.
+ if (DC->isFunctionOrMethod())
+ return 0;
+
+ // If not in C++, we perform name lookup for the translation unit via the
+ // IdentifierInfo chains, don't bother to build a visible-declarations table.
+ // FIXME: In C++ we need the visible declarations in order to "see" the
+ // friend declarations, is there a way to do this without writing the table ?
+ if (DC->isTranslationUnit() && !Context.getLangOptions().CPlusPlus)
return 0;
// Force the DeclContext to build a its name-lookup table.
@@ -1832,66 +1893,66 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
default:
assert(0 && "Does not support PCH writing for this attribute yet!");
break;
- case Attr::Alias:
+ case attr::Alias:
AddString(cast<AliasAttr>(Attr)->getAliasee(), Record);
break;
- case Attr::AlignMac68k:
+ case attr::AlignMac68k:
break;
- case Attr::Aligned:
+ case attr::Aligned:
Record.push_back(cast<AlignedAttr>(Attr)->getAlignment());
break;
- case Attr::AlwaysInline:
+ case attr::AlwaysInline:
break;
- case Attr::AnalyzerNoReturn:
+ case attr::AnalyzerNoReturn:
break;
- case Attr::Annotate:
+ case attr::Annotate:
AddString(cast<AnnotateAttr>(Attr)->getAnnotation(), Record);
break;
- case Attr::AsmLabel:
+ case attr::AsmLabel:
AddString(cast<AsmLabelAttr>(Attr)->getLabel(), Record);
break;
- case Attr::BaseCheck:
+ case attr::BaseCheck:
break;
- case Attr::Blocks:
+ case attr::Blocks:
Record.push_back(cast<BlocksAttr>(Attr)->getType()); // FIXME: stable
break;
- case Attr::CDecl:
+ case attr::CDecl:
break;
- case Attr::Cleanup:
+ case attr::Cleanup:
AddDeclRef(cast<CleanupAttr>(Attr)->getFunctionDecl(), Record);
break;
- case Attr::Const:
+ case attr::Const:
break;
- case Attr::Constructor:
+ case attr::Constructor:
Record.push_back(cast<ConstructorAttr>(Attr)->getPriority());
break;
- case Attr::DLLExport:
- case Attr::DLLImport:
- case Attr::Deprecated:
+ case attr::DLLExport:
+ case attr::DLLImport:
+ case attr::Deprecated:
break;
- case Attr::Destructor:
+ case attr::Destructor:
Record.push_back(cast<DestructorAttr>(Attr)->getPriority());
break;
- case Attr::FastCall:
- case Attr::Final:
+ case attr::FastCall:
+ case attr::Final:
break;
- case Attr::Format: {
+ case attr::Format: {
const FormatAttr *Format = cast<FormatAttr>(Attr);
AddString(Format->getType(), Record);
Record.push_back(Format->getFormatIdx());
@@ -1899,93 +1960,93 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
break;
}
- case Attr::FormatArg: {
+ case attr::FormatArg: {
const FormatArgAttr *Format = cast<FormatArgAttr>(Attr);
Record.push_back(Format->getFormatIdx());
break;
}
- case Attr::Sentinel : {
+ case attr::Sentinel : {
const SentinelAttr *Sentinel = cast<SentinelAttr>(Attr);
Record.push_back(Sentinel->getSentinel());
Record.push_back(Sentinel->getNullPos());
break;
}
- case Attr::GNUInline:
- case Attr::Hiding:
- case Attr::IBActionKind:
- case Attr::IBOutletKind:
- case Attr::Malloc:
- case Attr::NoDebug:
- case Attr::NoInline:
- case Attr::NoReturn:
- case Attr::NoThrow:
+ case attr::GNUInline:
+ case attr::Hiding:
+ case attr::IBAction:
+ case attr::IBOutlet:
+ case attr::Malloc:
+ case attr::NoDebug:
+ case attr::NoInline:
+ case attr::NoReturn:
+ case attr::NoThrow:
break;
- case Attr::IBOutletCollectionKind: {
+ case attr::IBOutletCollection: {
const IBOutletCollectionAttr *ICA = cast<IBOutletCollectionAttr>(Attr);
AddDeclRef(ICA->getClass(), Record);
break;
}
- case Attr::NonNull: {
+ case attr::NonNull: {
const NonNullAttr *NonNull = cast<NonNullAttr>(Attr);
Record.push_back(NonNull->size());
Record.insert(Record.end(), NonNull->begin(), NonNull->end());
break;
}
- case Attr::CFReturnsNotRetained:
- case Attr::CFReturnsRetained:
- case Attr::NSReturnsNotRetained:
- case Attr::NSReturnsRetained:
- case Attr::ObjCException:
- case Attr::ObjCNSObject:
- case Attr::Overloadable:
- case Attr::Override:
+ case attr::CFReturnsNotRetained:
+ case attr::CFReturnsRetained:
+ case attr::NSReturnsNotRetained:
+ case attr::NSReturnsRetained:
+ case attr::ObjCException:
+ case attr::ObjCNSObject:
+ case attr::Overloadable:
+ case attr::Override:
break;
- case Attr::MaxFieldAlignment:
+ case attr::MaxFieldAlignment:
Record.push_back(cast<MaxFieldAlignmentAttr>(Attr)->getAlignment());
break;
- case Attr::Packed:
+ case attr::Packed:
break;
- case Attr::Pure:
+ case attr::Pure:
break;
- case Attr::Regparm:
+ case attr::Regparm:
Record.push_back(cast<RegparmAttr>(Attr)->getNumParams());
break;
- case Attr::ReqdWorkGroupSize:
+ case attr::ReqdWorkGroupSize:
Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getXDim());
Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getYDim());
Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getZDim());
break;
- case Attr::Section:
+ case attr::Section:
AddString(cast<SectionAttr>(Attr)->getName(), Record);
break;
- case Attr::StdCall:
- case Attr::TransparentUnion:
- case Attr::Unavailable:
- case Attr::Unused:
- case Attr::Used:
+ case attr::StdCall:
+ case attr::TransparentUnion:
+ case attr::Unavailable:
+ case attr::Unused:
+ case attr::Used:
break;
- case Attr::Visibility:
+ case attr::Visibility:
// FIXME: stable encoding
Record.push_back(cast<VisibilityAttr>(Attr)->getVisibility());
break;
- case Attr::WarnUnusedResult:
- case Attr::Weak:
- case Attr::WeakRef:
- case Attr::WeakImport:
+ case attr::WarnUnusedResult:
+ case attr::Weak:
+ case attr::WeakRef:
+ case attr::WeakImport:
break;
}
}
@@ -2012,18 +2073,16 @@ void PCHWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
SelectorOffsets[ID - 1] = Offset;
}
-PCHWriter::PCHWriter(llvm::BitstreamWriter &Stream)
- : Stream(Stream), NextTypeID(pch::NUM_PREDEF_TYPE_IDS),
- NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
- NumVisibleDeclContexts(0) { }
+PCHWriter::PCHWriter(llvm::BitstreamWriter &Stream, PCHReader *Chain)
+ : Stream(Stream), Chain(Chain), NextTypeID(pch::NUM_PREDEF_TYPE_IDS),
+ CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0),
+ NumLexicalDeclContexts(0), NumVisibleDeclContexts(0) {
+ if (Chain)
+ Chain->setDeserializationListener(this);
+}
void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
const char *isysroot) {
- using namespace llvm;
-
- ASTContext &Context = SemaRef.Context;
- Preprocessor &PP = SemaRef.PP;
-
// Emit the file header.
Stream.Emit((unsigned)'C', 8);
Stream.Emit((unsigned)'P', 8);
@@ -2032,6 +2091,19 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
WriteBlockInfoBlock();
+ if (Chain)
+ WritePCHChain(SemaRef, StatCalls, isysroot);
+ else
+ WritePCHCore(SemaRef, StatCalls, isysroot);
+}
+
+void PCHWriter::WritePCHCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const char *isysroot) {
+ using namespace llvm;
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+
// The translation unit is the first declaration we'll emit.
DeclIDs[Context.getTranslationUnitDecl()] = 1;
DeclTypesToEmit.push(Context.getTranslationUnitDecl());
@@ -2077,13 +2149,27 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
for (unsigned I = 0, N = SemaRef.ExtVectorDecls.size(); I != N; ++I)
AddDeclRef(SemaRef.ExtVectorDecls[I], ExtVectorDecls);
+ // Build a record containing all of the VTable uses information.
+ RecordData VTableUses;
+ VTableUses.push_back(SemaRef.VTableUses.size());
+ for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I) {
+ AddDeclRef(SemaRef.VTableUses[I].first, VTableUses);
+ AddSourceLocation(SemaRef.VTableUses[I].second, VTableUses);
+ VTableUses.push_back(SemaRef.VTablesUsed[SemaRef.VTableUses[I].first]);
+ }
+
+ // Build a record containing all of dynamic classes declarations.
+ RecordData DynamicClasses;
+ for (unsigned I = 0, N = SemaRef.DynamicClasses.size(); I != N; ++I)
+ AddDeclRef(SemaRef.DynamicClasses[I], DynamicClasses);
+
// Write the remaining PCH contents.
RecordData Record;
Stream.EnterSubblock(pch::PCH_BLOCK_ID, 5);
WriteMetadata(Context, isysroot);
WriteLanguageOptions(Context.getLangOptions());
if (StatCalls && !isysroot)
- WriteStatCache(*StatCalls, isysroot);
+ WriteStatCache(*StatCalls);
WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
// Write the record of special types.
Record.clear();
@@ -2104,6 +2190,7 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
AddTypeRef(Context.getRawBlockdescriptorExtendedType(), Record);
AddTypeRef(Context.ObjCSelRedefinitionType, Record);
AddTypeRef(Context.getRawNSConstantStringType(), Record);
+ Record.push_back(Context.isInt128Installed());
Stream.EmitRecord(pch::SPECIAL_TYPES, Record);
// Keep writing types and declarations until all types and
@@ -2171,6 +2258,14 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
if (!ExtVectorDecls.empty())
Stream.EmitRecord(pch::EXT_VECTOR_DECLS, ExtVectorDecls);
+ // Write the record containing VTable uses information.
+ if (!VTableUses.empty())
+ Stream.EmitRecord(pch::VTABLE_USES, VTableUses);
+
+ // Write the record containing dynamic classes declarations.
+ if (!DynamicClasses.empty())
+ Stream.EmitRecord(pch::DYNAMIC_CLASSES, DynamicClasses);
+
// Some simple statistics
Record.clear();
Record.push_back(NumStatements);
@@ -2181,6 +2276,64 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls,
Stream.ExitBlock();
}
+void PCHWriter::WritePCHChain(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const char *isysroot) {
+ using namespace llvm;
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+ (void)PP;
+
+ RecordData Record;
+ Stream.EnterSubblock(pch::PCH_BLOCK_ID, 5);
+ WriteMetadata(Context, isysroot);
+ // FIXME: StatCache
+ // FIXME: Source manager block
+
+ // The special types are in the chained PCH.
+
+ // We don't start with the translation unit, but with its decls that
+ // don't come from the other PCH.
+ const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ // FIXME: We don't want to iterate over everything here, because it needlessly
+ // deserializes the entire original PCH. Instead we only want to iterate over
+ // the stuff that's already there.
+ // All in good time, though.
+ for (DeclContext::decl_iterator I = TU->decls_begin(), E = TU->decls_end();
+ I != E; ++I) {
+ if ((*I)->getPCHLevel() == 0) {
+ (*I)->dump();
+ DeclTypesToEmit.push(*I);
+ }
+ }
+
+ Stream.EnterSubblock(pch::DECLTYPES_BLOCK_ID, 3);
+ WriteDeclsBlockAbbrevs();
+ while (!DeclTypesToEmit.empty()) {
+ DeclOrType DOT = DeclTypesToEmit.front();
+ DeclTypesToEmit.pop();
+ if (DOT.isType())
+ WriteType(DOT.getType());
+ else
+ WriteDecl(Context, DOT.getDecl());
+ }
+ Stream.ExitBlock();
+
+ // FIXME: Preprocessor
+ // FIXME: Method pool
+ // FIXME: Identifier table
+ // FIXME: Type offsets
+ // FIXME: Declaration offsets
+ // FIXME: External unnamed definitions
+ // FIXME: Tentative definitions
+ // FIXME: Unused static functions
+ // FIXME: Locally-scoped external definitions
+ // FIXME: ext_vector type names
+ // FIXME: Dynamic classes declarations
+ // FIXME: Statistics
+ Stream.ExitBlock();
+}
+
void PCHWriter::AddSourceLocation(SourceLocation Loc, RecordData &Record) {
Record.push_back(Loc.getRawEncoding());
}
@@ -2249,20 +2402,19 @@ void PCHWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordData &Record) {
AddDeclRef(Temp->getDestructor(), Record);
}
-void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
- RecordData &Record) {
- switch (Arg.getArgument().getKind()) {
+void PCHWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
+ const TemplateArgumentLocInfo &Arg,
+ RecordData &Record) {
+ switch (Kind) {
case TemplateArgument::Expression:
- AddStmt(Arg.getLocInfo().getAsExpr());
+ AddStmt(Arg.getAsExpr());
break;
case TemplateArgument::Type:
- AddTypeSourceInfo(Arg.getLocInfo().getAsTypeSourceInfo(), Record);
+ AddTypeSourceInfo(Arg.getAsTypeSourceInfo(), Record);
break;
case TemplateArgument::Template:
- Record.push_back(
- Arg.getTemplateQualifierRange().getBegin().getRawEncoding());
- Record.push_back(Arg.getTemplateQualifierRange().getEnd().getRawEncoding());
- Record.push_back(Arg.getTemplateNameLoc().getRawEncoding());
+ AddSourceRange(Arg.getTemplateQualifierRange(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
break;
case TemplateArgument::Null:
case TemplateArgument::Integral:
@@ -2272,6 +2424,21 @@ void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
}
}
+void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
+ RecordData &Record) {
+ AddTemplateArgument(Arg.getArgument(), Record);
+
+ if (Arg.getArgument().getKind() == TemplateArgument::Expression) {
+ bool InfoHasSameExpr
+ = Arg.getArgument().getAsExpr() == Arg.getLocInfo().getAsExpr();
+ Record.push_back(InfoHasSameExpr);
+ if (InfoHasSameExpr)
+ return; // Avoid storing the same expr twice.
+ }
+ AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo(),
+ Record);
+}
+
void PCHWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record) {
if (TInfo == 0) {
AddTypeRef(QualType(), Record);
@@ -2459,3 +2626,123 @@ void PCHWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
}
}
}
+
+void PCHWriter::AddTemplateName(TemplateName Name, RecordData &Record) {
+ TemplateName::NameKind Kind = Name.getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case TemplateName::Template:
+ AddDeclRef(Name.getAsTemplateDecl(), Record);
+ break;
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *OvT = Name.getAsOverloadedTemplate();
+ Record.push_back(OvT->size());
+ for (OverloadedTemplateStorage::iterator I = OvT->begin(), E = OvT->end();
+ I != E; ++I)
+ AddDeclRef(*I, Record);
+ break;
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName();
+ AddNestedNameSpecifier(QualT->getQualifier(), Record);
+ Record.push_back(QualT->hasTemplateKeyword());
+ AddDeclRef(QualT->getTemplateDecl(), Record);
+ break;
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DepT = Name.getAsDependentTemplateName();
+ AddNestedNameSpecifier(DepT->getQualifier(), Record);
+ Record.push_back(DepT->isIdentifier());
+ if (DepT->isIdentifier())
+ AddIdentifierRef(DepT->getIdentifier(), Record);
+ else
+ Record.push_back(DepT->getOperator());
+ break;
+ }
+ }
+}
+
+void PCHWriter::AddTemplateArgument(const TemplateArgument &Arg,
+ RecordData &Record) {
+ Record.push_back(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+ case TemplateArgument::Type:
+ AddTypeRef(Arg.getAsType(), Record);
+ break;
+ case TemplateArgument::Declaration:
+ AddDeclRef(Arg.getAsDecl(), Record);
+ break;
+ case TemplateArgument::Integral:
+ AddAPSInt(*Arg.getAsIntegral(), Record);
+ AddTypeRef(Arg.getIntegralType(), Record);
+ break;
+ case TemplateArgument::Template:
+ AddTemplateName(Arg.getAsTemplate(), Record);
+ break;
+ case TemplateArgument::Expression:
+ AddStmt(Arg.getAsExpr());
+ break;
+ case TemplateArgument::Pack:
+ Record.push_back(Arg.pack_size());
+ for (TemplateArgument::pack_iterator I=Arg.pack_begin(), E=Arg.pack_end();
+ I != E; ++I)
+ AddTemplateArgument(*I, Record);
+ break;
+ }
+}
+
+void
+PCHWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
+ RecordData &Record) {
+ assert(TemplateParams && "No TemplateParams!");
+ AddSourceLocation(TemplateParams->getTemplateLoc(), Record);
+ AddSourceLocation(TemplateParams->getLAngleLoc(), Record);
+ AddSourceLocation(TemplateParams->getRAngleLoc(), Record);
+ Record.push_back(TemplateParams->size());
+ for (TemplateParameterList::const_iterator
+ P = TemplateParams->begin(), PEnd = TemplateParams->end();
+ P != PEnd; ++P)
+ AddDeclRef(*P, Record);
+}
+
+/// \brief Emit a template argument list.
+void
+PCHWriter::AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
+ RecordData &Record) {
+ assert(TemplateArgs && "No TemplateArgs!");
+ Record.push_back(TemplateArgs->flat_size());
+ for (int i=0, e = TemplateArgs->flat_size(); i != e; ++i)
+ AddTemplateArgument(TemplateArgs->get(i), Record);
+}
+
+
+void
+PCHWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record) {
+ Record.push_back(Set.size());
+ for (UnresolvedSetImpl::const_iterator
+ I = Set.begin(), E = Set.end(); I != E; ++I) {
+ AddDeclRef(I.getDecl(), Record);
+ Record.push_back(I.getAccess());
+ }
+}
+
+void PCHWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
+ RecordData &Record) {
+ Record.push_back(Base.isVirtual());
+ Record.push_back(Base.isBaseOfClass());
+ Record.push_back(Base.getAccessSpecifierAsWritten());
+ AddTypeRef(Base.getType(), Record);
+ AddSourceRange(Base.getSourceRange(), Record);
+}
+
+void PCHWriter::TypeRead(pch::TypeID ID, QualType T) {
+}
+
+void PCHWriter::DeclRead(pch::DeclID ID, const Decl *D) {
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp
index cc58e8e..bc4452e 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp
@@ -25,7 +25,7 @@ using namespace clang;
// Declaration serialization
//===----------------------------------------------------------------------===//
-namespace {
+namespace clang {
class PCHDeclWriter : public DeclVisitor<PCHDeclWriter, void> {
PCHWriter &Writer;
@@ -40,6 +40,8 @@ namespace {
PCHWriter::RecordData &Record)
: Writer(Writer), Context(Context), Record(Record) {
}
+
+ void Visit(Decl *D);
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *D);
@@ -49,7 +51,7 @@ namespace {
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *D);
void VisitTypedefDecl(TypedefDecl *D);
- void VisitUnresolvedUsingTypename(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
void VisitTagDecl(TagDecl *D);
void VisitEnumDecl(EnumDecl *D);
void VisitRecordDecl(RecordDecl *D);
@@ -61,7 +63,7 @@ namespace {
void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
void VisitValueDecl(ValueDecl *D);
void VisitEnumConstantDecl(EnumConstantDecl *D);
- void VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
void VisitDeclaratorDecl(DeclaratorDecl *D);
void VisitFunctionDecl(FunctionDecl *D);
void VisitCXXMethodDecl(CXXMethodDecl *D);
@@ -75,12 +77,14 @@ namespace {
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
- void visitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
- void VisitUsing(UsingDecl *D);
- void VisitUsingShadow(UsingShadowDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
void VisitFriendTemplateDecl(FriendTemplateDecl *D);
void VisitStaticAssertDecl(StaticAssertDecl *D);
void VisitBlockDecl(BlockDecl *D);
@@ -89,7 +93,7 @@ namespace {
uint64_t VisibleOffset);
- // FIXME: Put in the same order is DeclNodes.def?
+ // FIXME: Put in the same order is DeclNodes.td?
void VisitObjCMethodDecl(ObjCMethodDecl *D);
void VisitObjCContainerDecl(ObjCContainerDecl *D);
void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
@@ -108,6 +112,19 @@ namespace {
};
}
+void PCHDeclWriter::Visit(Decl *D) {
+ DeclVisitor<PCHDeclWriter>::Visit(D);
+
+ // Handle FunctionDecl's body here and write it after all other Stmts/Exprs
+ // have been written. We want it last because we will not read it back when
+ // retrieving it from the PCH, we'll just lazily set the offset.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Record.push_back(FD->isThisDeclarationADefinition());
+ if (FD->isThisDeclarationADefinition())
+ Writer.AddStmt(FD->getBody());
+ }
+}
+
void PCHDeclWriter::VisitDecl(Decl *D) {
Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
@@ -115,7 +132,7 @@ void PCHDeclWriter::VisitDecl(Decl *D) {
Record.push_back(D->isInvalidDecl());
Record.push_back(D->hasAttrs());
Record.push_back(D->isImplicit());
- Record.push_back(D->isUsed());
+ Record.push_back(D->isUsed(false));
Record.push_back(D->getAccess());
Record.push_back(D->getPCHLevel());
}
@@ -144,6 +161,7 @@ void PCHDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
void PCHDeclWriter::VisitTagDecl(TagDecl *D) {
VisitTypeDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
Record.push_back(D->isDefinition());
@@ -160,7 +178,7 @@ void PCHDeclWriter::VisitEnumDecl(EnumDecl *D) {
Writer.AddTypeRef(D->getPromotionType(), Record);
Record.push_back(D->getNumPositiveBits());
Record.push_back(D->getNumNegativeBits());
- // FIXME: C++ InstantiatedFrom
+ Writer.AddDeclRef(D->getInstantiatedFromMemberEnum(), Record);
Code = pch::DECL_ENUM;
}
@@ -195,9 +213,70 @@ void PCHDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
void PCHDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
VisitDeclaratorDecl(D);
- Record.push_back(D->isThisDeclarationADefinition());
- if (D->isThisDeclarationADefinition())
- Writer.AddStmt(D->getBody());
+ Record.push_back(D->getIdentifierNamespace());
+ Record.push_back(D->getTemplatedKind());
+ switch (D->getTemplatedKind()) {
+ default: assert(false && "Unhandled TemplatedKind!");
+ break;
+ case FunctionDecl::TK_NonTemplate:
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ Writer.AddDeclRef(D->getDescribedFunctionTemplate(), Record);
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo();
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateSpecializationInfo *
+ FTSInfo = D->getTemplateSpecializationInfo();
+ // We want it canonical to guarantee that it has a Common*.
+ Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record);
+ Record.push_back(FTSInfo->getTemplateSpecializationKind());
+
+ // Template arguments.
+ Writer.AddTemplateArgumentList(FTSInfo->TemplateArguments, Record);
+
+ // Template args as written.
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten != 0);
+ if (FTSInfo->TemplateArgumentsAsWritten) {
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten->size());
+ for (int i=0, e = FTSInfo->TemplateArgumentsAsWritten->size(); i!=e; ++i)
+ Writer.AddTemplateArgumentLoc((*FTSInfo->TemplateArgumentsAsWritten)[i],
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->getLAngleLoc(),
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->getRAngleLoc(),
+ Record);
+ }
+
+ Writer.AddSourceLocation(FTSInfo->getPointOfInstantiation(), Record);
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ DependentFunctionTemplateSpecializationInfo *
+ DFTSInfo = D->getDependentSpecializationInfo();
+
+ // Templates.
+ Record.push_back(DFTSInfo->getNumTemplates());
+ for (int i=0, e = DFTSInfo->getNumTemplates(); i != e; ++i)
+ Writer.AddDeclRef(DFTSInfo->getTemplate(i), Record);
+
+ // Templates args.
+ Record.push_back(DFTSInfo->getNumTemplateArgs());
+ for (int i=0, e = DFTSInfo->getNumTemplateArgs(); i != e; ++i)
+ Writer.AddTemplateArgumentLoc(DFTSInfo->getTemplateArg(i), Record);
+ Writer.AddSourceLocation(DFTSInfo->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(DFTSInfo->getRAngleLoc(), Record);
+ break;
+ }
+ }
+
+ // FunctionDecl's body is handled last at PCHWriterDecl::Visit,
+ // after everything else is written.
Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
Record.push_back(D->getStorageClass()); // FIXME: stable encoding
@@ -211,7 +290,6 @@ void PCHDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
Record.push_back(D->isTrivial());
Record.push_back(D->isCopyAssignment());
Record.push_back(D->hasImplicitReturnZero());
- // FIXME: C++ TemplateOrInstantiation???
Writer.AddSourceLocation(D->getLocEnd(), Record);
Record.push_back(D->param_size());
@@ -357,9 +435,10 @@ void PCHDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) {
void PCHDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
Writer.AddSourceLocation(D->getAtLoc(), Record);
- Writer.AddTypeRef(D->getType(), Record);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
// FIXME: stable encoding
Record.push_back((unsigned)D->getPropertyAttributes());
+ Record.push_back((unsigned)D->getPropertyAttributesAsWritten());
// FIXME: stable encoding
Record.push_back((unsigned)D->getPropertyImplementation());
Writer.AddDeclarationName(D->getGetterName(), Record);
@@ -404,6 +483,8 @@ void PCHDeclWriter::VisitFieldDecl(FieldDecl *D) {
Record.push_back(D->getBitWidth()? 1 : 0);
if (D->getBitWidth())
Writer.AddStmt(D->getBitWidth());
+ if (!D->getDeclName())
+ Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
Code = pch::DECL_FIELD;
}
@@ -420,6 +501,16 @@ void PCHDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->getInit() ? 1 : 0);
if (D->getInit())
Writer.AddStmt(D->getInit());
+
+ MemberSpecializationInfo *SpecInfo
+ = D->isStaticDataMember() ? D->getMemberSpecializationInfo() : 0;
+ Record.push_back(SpecInfo != 0);
+ if (SpecInfo) {
+ Writer.AddDeclRef(SpecInfo->getInstantiatedFrom(), Record);
+ Record.push_back(SpecInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(SpecInfo->getPointOfInstantiation(), Record);
+ }
+
Code = pch::DECL_VAR;
}
@@ -432,6 +523,9 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
VisitVarDecl(D);
Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding
Record.push_back(D->hasInheritedDefaultArg());
+ Record.push_back(D->hasUninstantiatedDefaultArg());
+ if (D->hasUninstantiatedDefaultArg())
+ Writer.AddStmt(D->getUninstantiatedDefaultArg());
Code = pch::DECL_PARM_VAR;
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
@@ -440,14 +534,15 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
if (!D->getTypeSourceInfo() &&
!D->hasAttrs() &&
!D->isImplicit() &&
- !D->isUsed() &&
+ !D->isUsed(false) &&
D->getAccess() == AS_none &&
D->getPCHLevel() == 0 &&
D->getStorageClass() == 0 &&
!D->hasCXXDirectInitializer() && // Can params have this ever?
D->getObjCDeclQualifier() == 0 &&
!D->hasInheritedDefaultArg() &&
- D->getInit() == 0) // No default expr.
+ D->getInit() == 0 &&
+ !D->hasUninstantiatedDefaultArg()) // No default expr.
AbbrevToUse = Writer.getParmVarDeclAbbrev();
// Check things we know are true of *every* PARM_VAR_DECL, which is more than
@@ -458,6 +553,8 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
assert(!D->isDeclaredInCondition() && "PARM_VAR_DECL can't be in condition");
assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var");
assert(D->getPreviousDeclaration() == 0 && "PARM_VAR_DECL can't be redecl");
+ assert(!D->isStaticDataMember() &&
+ "PARM_VAR_DECL can't be static data member");
}
void PCHDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
@@ -469,6 +566,7 @@ void PCHDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
void PCHDeclWriter::VisitBlockDecl(BlockDecl *D) {
VisitDecl(D);
Writer.AddStmt(D->getBody());
+ Writer.AddTypeSourceInfo(D->getSignatureAsWritten(), Record);
Record.push_back(D->param_size());
for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
P != PEnd; ++P)
@@ -510,7 +608,7 @@ void PCHDeclWriter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
Code = pch::DECL_NAMESPACE_ALIAS;
}
-void PCHDeclWriter::VisitUsing(UsingDecl *D) {
+void PCHDeclWriter::VisitUsingDecl(UsingDecl *D) {
VisitNamedDecl(D);
Writer.AddSourceRange(D->getNestedNameRange(), Record);
Writer.AddSourceLocation(D->getUsingLocation(), Record);
@@ -520,13 +618,15 @@ void PCHDeclWriter::VisitUsing(UsingDecl *D) {
PEnd = D->shadow_end(); P != PEnd; ++P)
Writer.AddDeclRef(*P, Record);
Record.push_back(D->isTypeName());
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record);
Code = pch::DECL_USING;
}
-void PCHDeclWriter::VisitUsingShadow(UsingShadowDecl *D) {
+void PCHDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitNamedDecl(D);
Writer.AddDeclRef(D->getTargetDecl(), Record);
Writer.AddDeclRef(D->getUsingDecl(), Record);
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D), Record);
Code = pch::DECL_USING_SHADOW;
}
@@ -541,7 +641,7 @@ void PCHDeclWriter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
Code = pch::DECL_USING_DIRECTIVE;
}
-void PCHDeclWriter::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) {
+void PCHDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
VisitValueDecl(D);
Writer.AddSourceRange(D->getTargetNestedNameRange(), Record);
Writer.AddSourceLocation(D->getUsingLoc(), Record);
@@ -549,7 +649,7 @@ void PCHDeclWriter::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) {
Code = pch::DECL_UNRESOLVED_USING_VALUE;
}
-void PCHDeclWriter::VisitUnresolvedUsingTypename(
+void PCHDeclWriter::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
VisitTypeDecl(D);
Writer.AddSourceRange(D->getTargetNestedNameRange(), Record);
@@ -560,71 +660,324 @@ void PCHDeclWriter::VisitUnresolvedUsingTypename(
}
void PCHDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
- // assert(false && "cannot write CXXRecordDecl");
+ // See comments at PCHDeclReader::VisitCXXRecordDecl about why this happens
+ // before VisitRecordDecl.
+ enum { Data_NoDefData, Data_Owner, Data_NotOwner };
+ bool OwnsDefinitionData = false;
+ if (D->DefinitionData) {
+ assert(D->DefinitionData->Definition &&
+ "DefinitionData don't point to a definition decl!");
+ OwnsDefinitionData = D->DefinitionData->Definition == D;
+ if (OwnsDefinitionData) {
+ Record.push_back(Data_Owner);
+ } else {
+ Record.push_back(Data_NotOwner);
+ Writer.AddDeclRef(D->DefinitionData->Definition, Record);
+ }
+ } else
+ Record.push_back(Data_NoDefData);
+
VisitRecordDecl(D);
+
+ if (OwnsDefinitionData) {
+ assert(D->DefinitionData);
+ struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
+
+ Record.push_back(Data.UserDeclaredConstructor);
+ Record.push_back(Data.UserDeclaredCopyConstructor);
+ Record.push_back(Data.UserDeclaredCopyAssignment);
+ Record.push_back(Data.UserDeclaredDestructor);
+ Record.push_back(Data.Aggregate);
+ Record.push_back(Data.PlainOldData);
+ Record.push_back(Data.Empty);
+ Record.push_back(Data.Polymorphic);
+ Record.push_back(Data.Abstract);
+ Record.push_back(Data.HasTrivialConstructor);
+ Record.push_back(Data.HasTrivialCopyConstructor);
+ Record.push_back(Data.HasTrivialCopyAssignment);
+ Record.push_back(Data.HasTrivialDestructor);
+ Record.push_back(Data.ComputedVisibleConversions);
+ Record.push_back(Data.DeclaredDefaultConstructor);
+ Record.push_back(Data.DeclaredCopyConstructor);
+ Record.push_back(Data.DeclaredCopyAssignment);
+ Record.push_back(Data.DeclaredDestructor);
+
+ Record.push_back(D->getNumBases());
+ for (CXXRecordDecl::base_class_iterator I = D->bases_begin(),
+ E = D->bases_end(); I != E; ++I)
+ Writer.AddCXXBaseSpecifier(*I, Record);
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record.push_back(D->getNumVBases());
+ for (CXXRecordDecl::base_class_iterator I = D->vbases_begin(),
+ E = D->vbases_end(); I != E; ++I)
+ Writer.AddCXXBaseSpecifier(*I, Record);
+
+ Writer.AddUnresolvedSet(Data.Conversions, Record);
+ Writer.AddUnresolvedSet(Data.VisibleConversions, Record);
+ // Data.Definition is written at the top.
+ Writer.AddDeclRef(Data.FirstFriend, Record);
+ }
+
+ enum {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) {
+ Record.push_back(CXXRecTemplate);
+ Writer.AddDeclRef(TemplD, Record);
+ } else if (MemberSpecializationInfo *MSInfo
+ = D->getMemberSpecializationInfo()) {
+ Record.push_back(CXXRecMemberSpecialization);
+ Writer.AddDeclRef(MSInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MSInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MSInfo->getPointOfInstantiation(), Record);
+ } else {
+ Record.push_back(CXXRecNotTemplate);
+ }
+
Code = pch::DECL_CXX_RECORD;
}
void PCHDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
- // assert(false && "cannot write CXXMethodDecl");
VisitFunctionDecl(D);
+ Record.push_back(D->size_overridden_methods());
+ for (CXXMethodDecl::method_iterator
+ I = D->begin_overridden_methods(), E = D->end_overridden_methods();
+ I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
Code = pch::DECL_CXX_METHOD;
}
void PCHDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
- // assert(false && "cannot write CXXConstructorDecl");
VisitCXXMethodDecl(D);
+
+ Record.push_back(D->IsExplicitSpecified);
+ Record.push_back(D->ImplicitlyDefined);
+
+ Record.push_back(D->NumBaseOrMemberInitializers);
+ for (unsigned i=0; i != D->NumBaseOrMemberInitializers; ++i) {
+ CXXBaseOrMemberInitializer *Init = D->BaseOrMemberInitializers[i];
+
+ Record.push_back(Init->isBaseInitializer());
+ if (Init->isBaseInitializer()) {
+ Writer.AddTypeSourceInfo(Init->getBaseClassInfo(), Record);
+ Record.push_back(Init->isBaseVirtual());
+ } else {
+ Writer.AddDeclRef(Init->getMember(), Record);
+ }
+ Writer.AddSourceLocation(Init->getMemberLocation(), Record);
+ Writer.AddStmt(Init->getInit());
+ Writer.AddDeclRef(Init->getAnonUnionMember(), Record);
+ Writer.AddSourceLocation(Init->getLParenLoc(), Record);
+ Writer.AddSourceLocation(Init->getRParenLoc(), Record);
+ Record.push_back(Init->isWritten());
+ if (Init->isWritten()) {
+ Record.push_back(Init->getSourceOrder());
+ } else {
+ Record.push_back(Init->getNumArrayIndices());
+ for (unsigned i=0, e=Init->getNumArrayIndices(); i != e; ++i)
+ Writer.AddDeclRef(Init->getArrayIndex(i), Record);
+ }
+ }
+
Code = pch::DECL_CXX_CONSTRUCTOR;
}
void PCHDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
- // assert(false && "cannot write CXXDestructorDecl");
VisitCXXMethodDecl(D);
+
+ Record.push_back(D->ImplicitlyDefined);
+ Writer.AddDeclRef(D->OperatorDelete, Record);
+
Code = pch::DECL_CXX_DESTRUCTOR;
}
void PCHDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
- // assert(false && "cannot write CXXConversionDecl");
VisitCXXMethodDecl(D);
+ Record.push_back(D->IsExplicitSpecified);
Code = pch::DECL_CXX_CONVERSION;
}
+void PCHDeclWriter::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getColonLoc(), Record);
+ Code = pch::DECL_ACCESS_SPEC;
+}
+
+void PCHDeclWriter::VisitFriendDecl(FriendDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->Friend.is<TypeSourceInfo*>());
+ if (D->Friend.is<TypeSourceInfo*>())
+ Writer.AddTypeSourceInfo(D->Friend.get<TypeSourceInfo*>(), Record);
+ else
+ Writer.AddDeclRef(D->Friend.get<NamedDecl*>(), Record);
+ Writer.AddDeclRef(D->NextFriend, Record);
+ Writer.AddSourceLocation(D->FriendLoc, Record);
+ Code = pch::DECL_FRIEND;
+}
+
void PCHDeclWriter::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
assert(false && "cannot write FriendTemplateDecl");
}
void PCHDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
- assert(false && "cannot write TemplateDecl");
+ VisitNamedDecl(D);
+
+ Writer.AddDeclRef(D->getTemplatedDecl(), Record);
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+}
+
+static bool IsKeptInFoldingSet(ClassTemplateSpecializationDecl *D) {
+ return D->getTypeForDecl()->getAsCXXRecordDecl() == D;
}
void PCHDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
- assert(false && "cannot write ClassTemplateDecl");
+ VisitTemplateDecl(D);
+
+ Record.push_back(D->getIdentifierNamespace());
+ Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ if (D->getPreviousDeclaration() == 0) {
+ // This ClassTemplateDecl owns the CommonPtr; write it.
+ assert(D->isCanonicalDecl());
+
+ typedef llvm::FoldingSet<ClassTemplateSpecializationDecl> CTSDSetTy;
+ CTSDSetTy &CTSDSet = D->getSpecializations();
+ Record.push_back(CTSDSet.size());
+ for (CTSDSetTy::iterator I=CTSDSet.begin(), E = CTSDSet.end(); I!=E; ++I) {
+ assert(IsKeptInFoldingSet(&*I));
+ Writer.AddDeclRef(&*I, Record);
+ }
+
+ typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> CTPSDSetTy;
+ CTPSDSetTy &CTPSDSet = D->getPartialSpecializations();
+ Record.push_back(CTPSDSet.size());
+ for (CTPSDSetTy::iterator I=CTPSDSet.begin(), E=CTPSDSet.end(); I!=E; ++I) {
+ assert(IsKeptInFoldingSet(&*I));
+ Writer.AddDeclRef(&*I, Record);
+ }
+
+ // InjectedClassNameType is computed, no need to write it.
+
+ Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record);
+ if (D->getInstantiatedFromMemberTemplate())
+ Record.push_back(D->isMemberSpecialization());
+ }
+ Code = pch::DECL_CLASS_TEMPLATE;
}
void PCHDeclWriter::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *D) {
- assert(false && "cannot write ClassTemplateSpecializationDecl");
+ VisitCXXRecordDecl(D);
+
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> InstFrom
+ = D->getSpecializedTemplateOrPartial();
+ if (InstFrom.is<ClassTemplateDecl *>()) {
+ Writer.AddDeclRef(InstFrom.get<ClassTemplateDecl *>(), Record);
+ } else {
+ Writer.AddDeclRef(InstFrom.get<ClassTemplatePartialSpecializationDecl *>(),
+ Record);
+ Writer.AddTemplateArgumentList(&D->getTemplateInstantiationArgs(), Record);
+ }
+
+ // Explicit info.
+ Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record);
+ if (D->getTypeAsWritten()) {
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getTemplateKeywordLoc(), Record);
+ }
+
+ Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record);
+ Writer.AddSourceLocation(D->getPointOfInstantiation(), Record);
+ Record.push_back(D->getSpecializationKind());
+
+ bool IsInInFoldingSet = IsKeptInFoldingSet(D);
+ Record.push_back(IsInInFoldingSet);
+ if (IsInInFoldingSet) {
+ // When reading, we'll add it to the folding set of this one.
+ Writer.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl(), Record);
+ }
+
+ Code = pch::DECL_CLASS_TEMPLATE_SPECIALIZATION;
}
void PCHDeclWriter::VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D) {
- assert(false && "cannot write ClassTemplatePartialSpecializationDecl");
+ VisitClassTemplateSpecializationDecl(D);
+
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+
+ Record.push_back(D->getNumTemplateArgsAsWritten());
+ for (int i = 0, e = D->getNumTemplateArgsAsWritten(); i != e; ++i)
+ Writer.AddTemplateArgumentLoc(D->getTemplateArgsAsWritten()[i], Record);
+
+ Record.push_back(D->getSequenceNumber());
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDeclaration() == 0) {
+ Writer.AddDeclRef(D->getInstantiatedFromMember(), Record);
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ Code = pch::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION;
}
-void PCHDeclWriter::visitFunctionTemplateDecl(FunctionTemplateDecl *D) {
- assert(false && "cannot write FunctionTemplateDecl");
+void PCHDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ VisitTemplateDecl(D);
+
+ Record.push_back(D->getIdentifierNamespace());
+ Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ if (D->getPreviousDeclaration() == 0) {
+ // This FunctionTemplateDecl owns the CommonPtr; write it.
+
+ // Write the function specialization declarations.
+ Record.push_back(D->getSpecializations().size());
+ for (llvm::FoldingSet<FunctionTemplateSpecializationInfo>::iterator
+ I = D->getSpecializations().begin(),
+ E = D->getSpecializations().end() ; I != E; ++I)
+ Writer.AddDeclRef(I->Function, Record);
+
+ Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record);
+ if (D->getInstantiatedFromMemberTemplate())
+ Record.push_back(D->isMemberSpecialization());
+ }
+ Code = pch::DECL_FUNCTION_TEMPLATE;
}
void PCHDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
- assert(false && "cannot write TemplateTypeParmDecl");
+ VisitTypeDecl(D);
+
+ Record.push_back(D->wasDeclaredWithTypename());
+ Record.push_back(D->isParameterPack());
+ Record.push_back(D->defaultArgumentWasInherited());
+ Writer.AddTypeSourceInfo(D->getDefaultArgumentInfo(), Record);
+
+ Code = pch::DECL_TEMPLATE_TYPE_PARM;
}
void PCHDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
- assert(false && "cannot write NonTypeTemplateParmDecl");
+ VisitVarDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+ // Rest of NonTypeTemplateParmDecl.
+ Record.push_back(D->getDefaultArgument() != 0);
+ if (D->getDefaultArgument()) {
+ Writer.AddStmt(D->getDefaultArgument());
+ Record.push_back(D->defaultArgumentWasInherited());
+ }
+ Code = pch::DECL_NON_TYPE_TEMPLATE_PARM;
}
void PCHDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
- assert(false && "cannot write TemplateTemplateParmDecl");
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+ // Rest of TemplateTemplateParmDecl.
+ Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
+ Record.push_back(D->defaultArgumentWasInherited());
+ Code = pch::DECL_TEMPLATE_TEMPLATE_PARM;
}
void PCHDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
@@ -687,9 +1040,11 @@ void PCHWriter::WriteDeclsBlockAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(0)); // PrevDecl
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
+ Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
// ParmVarDecl
Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier
Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg
+ Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg
ParmVarDeclAbbrev = Stream.EmitAbbrev(Abv);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp
index a9ee435..7537728 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp
@@ -22,7 +22,7 @@ using namespace clang;
// Statement/expression serialization
//===----------------------------------------------------------------------===//
-namespace {
+namespace clang {
class PCHStmtWriter : public StmtVisitor<PCHStmtWriter, void> {
PCHWriter &Writer;
PCHWriter::RecordData &Record;
@@ -32,6 +32,9 @@ namespace {
PCHStmtWriter(PCHWriter &Writer, PCHWriter::RecordData &Record)
: Writer(Writer), Record(Record) { }
+
+ void
+ AddExplicitTemplateArgumentList(const ExplicitTemplateArgumentList &Args);
void VisitStmt(Stmt *S);
void VisitNullStmt(NullStmt *S);
@@ -61,6 +64,7 @@ namespace {
void VisitStringLiteral(StringLiteral *E);
void VisitCharacterLiteral(CharacterLiteral *E);
void VisitParenExpr(ParenExpr *E);
+ void VisitParenListExpr(ParenListExpr *E);
void VisitUnaryOperator(UnaryOperator *E);
void VisitOffsetOfExpr(OffsetOfExpr *E);
void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
@@ -114,6 +118,7 @@ namespace {
void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E);
void VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
void VisitCXXConstructExpr(CXXConstructExpr *E);
+ void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E);
void VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
void VisitCXXStaticCastExpr(CXXStaticCastExpr *E);
void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E);
@@ -127,14 +132,34 @@ namespace {
void VisitCXXThrowExpr(CXXThrowExpr *E);
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
-
- void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ void VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E);
+
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXNewExpr(CXXNewExpr *E);
-
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E);
+ void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E);
+
void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+ void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E);
+ void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E);
+ void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E);
+
+ void VisitOverloadExpr(OverloadExpr *E);
+ void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E);
+ void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E);
+
+ void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E);
};
}
+void PCHStmtWriter::
+AddExplicitTemplateArgumentList(const ExplicitTemplateArgumentList &Args) {
+ Writer.AddSourceLocation(Args.LAngleLoc, Record);
+ Writer.AddSourceLocation(Args.RAngleLoc, Record);
+ for (unsigned i=0; i != Args.NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(Args.getTemplateArgs()[i], Record);
+}
+
void PCHStmtWriter::VisitStmt(Stmt *S) {
}
@@ -149,7 +174,7 @@ void PCHStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
Record.push_back(S->size());
for (CompoundStmt::body_iterator CS = S->body_begin(), CSEnd = S->body_end();
CS != CSEnd; ++CS)
- Writer.WriteSubStmt(*CS);
+ Writer.AddStmt(*CS);
Writer.AddSourceLocation(S->getLBracLoc(), Record);
Writer.AddSourceLocation(S->getRBracLoc(), Record);
Code = pch::STMT_COMPOUND;
@@ -157,14 +182,14 @@ void PCHStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
void PCHStmtWriter::VisitSwitchCase(SwitchCase *S) {
VisitStmt(S);
- Record.push_back(Writer.RecordSwitchCaseID(S));
+ Record.push_back(Writer.getSwitchCaseID(S));
}
void PCHStmtWriter::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
- Writer.WriteSubStmt(S->getLHS());
- Writer.WriteSubStmt(S->getRHS());
- Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddStmt(S->getLHS());
+ Writer.AddStmt(S->getRHS());
+ Writer.AddStmt(S->getSubStmt());
Writer.AddSourceLocation(S->getCaseLoc(), Record);
Writer.AddSourceLocation(S->getEllipsisLoc(), Record);
Writer.AddSourceLocation(S->getColonLoc(), Record);
@@ -173,7 +198,7 @@ void PCHStmtWriter::VisitCaseStmt(CaseStmt *S) {
void PCHStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
VisitSwitchCase(S);
- Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddStmt(S->getSubStmt());
Writer.AddSourceLocation(S->getDefaultLoc(), Record);
Writer.AddSourceLocation(S->getColonLoc(), Record);
Code = pch::STMT_DEFAULT;
@@ -182,7 +207,7 @@ void PCHStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
void PCHStmtWriter::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
Writer.AddIdentifierRef(S->getID(), Record);
- Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddStmt(S->getSubStmt());
Writer.AddSourceLocation(S->getIdentLoc(), Record);
Record.push_back(Writer.GetLabelID(S));
Code = pch::STMT_LABEL;
@@ -191,9 +216,9 @@ void PCHStmtWriter::VisitLabelStmt(LabelStmt *S) {
void PCHStmtWriter::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
Writer.AddDeclRef(S->getConditionVariable(), Record);
- Writer.WriteSubStmt(S->getCond());
- Writer.WriteSubStmt(S->getThen());
- Writer.WriteSubStmt(S->getElse());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getThen());
+ Writer.AddStmt(S->getElse());
Writer.AddSourceLocation(S->getIfLoc(), Record);
Writer.AddSourceLocation(S->getElseLoc(), Record);
Code = pch::STMT_IF;
@@ -202,28 +227,28 @@ void PCHStmtWriter::VisitIfStmt(IfStmt *S) {
void PCHStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
Writer.AddDeclRef(S->getConditionVariable(), Record);
- Writer.WriteSubStmt(S->getCond());
- Writer.WriteSubStmt(S->getBody());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getSwitchLoc(), Record);
for (SwitchCase *SC = S->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase())
- Record.push_back(Writer.getSwitchCaseID(SC));
+ Record.push_back(Writer.RecordSwitchCaseID(SC));
Code = pch::STMT_SWITCH;
}
void PCHStmtWriter::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
Writer.AddDeclRef(S->getConditionVariable(), Record);
- Writer.WriteSubStmt(S->getCond());
- Writer.WriteSubStmt(S->getBody());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getWhileLoc(), Record);
Code = pch::STMT_WHILE;
}
void PCHStmtWriter::VisitDoStmt(DoStmt *S) {
VisitStmt(S);
- Writer.WriteSubStmt(S->getCond());
- Writer.WriteSubStmt(S->getBody());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getDoLoc(), Record);
Writer.AddSourceLocation(S->getWhileLoc(), Record);
Writer.AddSourceLocation(S->getRParenLoc(), Record);
@@ -232,11 +257,11 @@ void PCHStmtWriter::VisitDoStmt(DoStmt *S) {
void PCHStmtWriter::VisitForStmt(ForStmt *S) {
VisitStmt(S);
- Writer.WriteSubStmt(S->getInit());
- Writer.WriteSubStmt(S->getCond());
+ Writer.AddStmt(S->getInit());
+ Writer.AddStmt(S->getCond());
Writer.AddDeclRef(S->getConditionVariable(), Record);
- Writer.WriteSubStmt(S->getInc());
- Writer.WriteSubStmt(S->getBody());
+ Writer.AddStmt(S->getInc());
+ Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getForLoc(), Record);
Writer.AddSourceLocation(S->getLParenLoc(), Record);
Writer.AddSourceLocation(S->getRParenLoc(), Record);
@@ -255,7 +280,7 @@ void PCHStmtWriter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
VisitStmt(S);
Writer.AddSourceLocation(S->getGotoLoc(), Record);
Writer.AddSourceLocation(S->getStarLoc(), Record);
- Writer.WriteSubStmt(S->getTarget());
+ Writer.AddStmt(S->getTarget());
Code = pch::STMT_INDIRECT_GOTO;
}
@@ -273,7 +298,7 @@ void PCHStmtWriter::VisitBreakStmt(BreakStmt *S) {
void PCHStmtWriter::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
- Writer.WriteSubStmt(S->getRetValue());
+ Writer.AddStmt(S->getRetValue());
Writer.AddSourceLocation(S->getReturnLoc(), Record);
Writer.AddDeclRef(S->getNRVOCandidate(), Record);
Code = pch::STMT_RETURN;
@@ -299,25 +324,25 @@ void PCHStmtWriter::VisitAsmStmt(AsmStmt *S) {
Record.push_back(S->isVolatile());
Record.push_back(S->isSimple());
Record.push_back(S->isMSAsm());
- Writer.WriteSubStmt(S->getAsmString());
+ Writer.AddStmt(S->getAsmString());
// Outputs
for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
Writer.AddIdentifierRef(S->getOutputIdentifier(I), Record);
- Writer.WriteSubStmt(S->getOutputConstraintLiteral(I));
- Writer.WriteSubStmt(S->getOutputExpr(I));
+ Writer.AddStmt(S->getOutputConstraintLiteral(I));
+ Writer.AddStmt(S->getOutputExpr(I));
}
// Inputs
for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
Writer.AddIdentifierRef(S->getInputIdentifier(I), Record);
- Writer.WriteSubStmt(S->getInputConstraintLiteral(I));
- Writer.WriteSubStmt(S->getInputExpr(I));
+ Writer.AddStmt(S->getInputConstraintLiteral(I));
+ Writer.AddStmt(S->getInputExpr(I));
}
// Clobbers
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
- Writer.WriteSubStmt(S->getClobber(I));
+ Writer.AddStmt(S->getClobber(I));
Code = pch::STMT_ASM;
}
@@ -338,10 +363,23 @@ void PCHStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
void PCHStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
+
+ Record.push_back(E->hasQualifier());
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgumentList() &&
+ "Template args list with no args ?");
+ Record.push_back(NumTemplateArgs);
+
+ if (E->hasQualifier()) {
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ }
+
+ if (NumTemplateArgs)
+ AddExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList());
+
Writer.AddDeclRef(E->getDecl(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
- // FIXME: write qualifier
- // FIXME: write explicit template arguments
Code = pch::EXPR_DECL_REF;
}
@@ -362,7 +400,7 @@ void PCHStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
void PCHStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Code = pch::EXPR_IMAGINARY_LITERAL;
}
@@ -394,13 +432,23 @@ void PCHStmtWriter::VisitParenExpr(ParenExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getLParen(), Record);
Writer.AddSourceLocation(E->getRParen(), Record);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Code = pch::EXPR_PAREN;
}
+void PCHStmtWriter::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumExprs);
+ for (unsigned i=0; i != E->NumExprs; ++i)
+ Writer.AddStmt(E->Exprs[i]);
+ Writer.AddSourceLocation(E->LParenLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Code = pch::EXPR_PAREN_LIST;
+}
+
void PCHStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Writer.AddSourceLocation(E->getOperatorLoc(), Record);
Code = pch::EXPR_UNARY_OPERATOR;
@@ -438,7 +486,7 @@ void PCHStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) {
}
}
for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
- Writer.WriteSubStmt(E->getIndexExpr(I));
+ Writer.AddStmt(E->getIndexExpr(I));
Code = pch::EXPR_OFFSETOF;
}
@@ -449,7 +497,7 @@ void PCHStmtWriter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
Writer.AddTypeSourceInfo(E->getArgumentTypeInfo(), Record);
else {
Record.push_back(0);
- Writer.WriteSubStmt(E->getArgumentExpr());
+ Writer.AddStmt(E->getArgumentExpr());
}
Writer.AddSourceLocation(E->getOperatorLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
@@ -458,8 +506,8 @@ void PCHStmtWriter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
void PCHStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getLHS());
- Writer.WriteSubStmt(E->getRHS());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
Writer.AddSourceLocation(E->getRBracketLoc(), Record);
Code = pch::EXPR_ARRAY_SUBSCRIPT;
}
@@ -468,27 +516,48 @@ void PCHStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumArgs());
Writer.AddSourceLocation(E->getRParenLoc(), Record);
- Writer.WriteSubStmt(E->getCallee());
+ Writer.AddStmt(E->getCallee());
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
- Writer.WriteSubStmt(*Arg);
+ Writer.AddStmt(*Arg);
Code = pch::EXPR_CALL;
}
void PCHStmtWriter::VisitMemberExpr(MemberExpr *E) {
- VisitExpr(E);
- Writer.WriteSubStmt(E->getBase());
+ // Don't call VisitExpr, we'll write everything here.
+
+ Record.push_back(E->hasQualifier());
+ if (E->hasQualifier()) {
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ }
+
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgumentList() &&
+ "Template args list with no args ?");
+ Record.push_back(NumTemplateArgs);
+ if (NumTemplateArgs) {
+ Writer.AddSourceLocation(E->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(E->getRAngleLoc(), Record);
+ for (unsigned i=0; i != NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(E->getTemplateArgs()[i], Record);
+ }
+
+ DeclAccessPair FoundDecl = E->getFoundDecl();
+ Writer.AddDeclRef(FoundDecl.getDecl(), Record);
+ Record.push_back(FoundDecl.getAccess());
+
+ Writer.AddTypeRef(E->getType(), Record);
+ Writer.AddStmt(E->getBase());
Writer.AddDeclRef(E->getMemberDecl(), Record);
Writer.AddSourceLocation(E->getMemberLoc(), Record);
Record.push_back(E->isArrow());
- // FIXME: C++ nested-name-specifier
- // FIXME: C++ template argument list
Code = pch::EXPR_MEMBER;
}
void PCHStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getBase());
+ Writer.AddStmt(E->getBase());
Writer.AddSourceLocation(E->getIsaMemberLoc(), Record);
Record.push_back(E->isArrow());
Code = pch::EXPR_OBJC_ISA;
@@ -496,14 +565,19 @@ void PCHStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) {
void PCHStmtWriter::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Record.push_back(E->getCastKind()); // FIXME: stable encoding
+ CXXBaseSpecifierArray &BasePath = E->getBasePath();
+ Record.push_back(BasePath.size());
+ for (CXXBaseSpecifierArray::iterator I = BasePath.begin(), E = BasePath.end();
+ I != E; ++I)
+ Writer.AddCXXBaseSpecifier(**I, Record);
}
void PCHStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getLHS());
- Writer.WriteSubStmt(E->getRHS());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
Record.push_back(E->getOpcode()); // FIXME: stable encoding
Writer.AddSourceLocation(E->getOperatorLoc(), Record);
Code = pch::EXPR_BINARY_OPERATOR;
@@ -518,9 +592,9 @@ void PCHStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
void PCHStmtWriter::VisitConditionalOperator(ConditionalOperator *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getCond());
- Writer.WriteSubStmt(E->getLHS());
- Writer.WriteSubStmt(E->getRHS());
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
Writer.AddSourceLocation(E->getQuestionLoc(), Record);
Writer.AddSourceLocation(E->getColonLoc(), Record);
Code = pch::EXPR_CONDITIONAL_OPERATOR;
@@ -548,14 +622,14 @@ void PCHStmtWriter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getLParenLoc(), Record);
Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
- Writer.WriteSubStmt(E->getInitializer());
+ Writer.AddStmt(E->getInitializer());
Record.push_back(E->isFileScope());
Code = pch::EXPR_COMPOUND_LITERAL;
}
void PCHStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getBase());
+ Writer.AddStmt(E->getBase());
Writer.AddIdentifierRef(&E->getAccessor(), Record);
Writer.AddSourceLocation(E->getAccessorLoc(), Record);
Code = pch::EXPR_EXT_VECTOR_ELEMENT;
@@ -565,8 +639,8 @@ void PCHStmtWriter::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumInits());
for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
- Writer.WriteSubStmt(E->getInit(I));
- Writer.WriteSubStmt(E->getSyntacticForm());
+ Writer.AddStmt(E->getInit(I));
+ Writer.AddStmt(E->getSyntacticForm());
Writer.AddSourceLocation(E->getLBraceLoc(), Record);
Writer.AddSourceLocation(E->getRBraceLoc(), Record);
Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record);
@@ -578,7 +652,7 @@ void PCHStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumSubExprs());
for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
- Writer.WriteSubStmt(E->getSubExpr(I));
+ Writer.AddStmt(E->getSubExpr(I));
Writer.AddSourceLocation(E->getEqualOrColonLoc(), Record);
Record.push_back(E->usesGNUSyntax());
for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
@@ -618,7 +692,7 @@ void PCHStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
void PCHStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = pch::EXPR_VA_ARG;
@@ -634,7 +708,7 @@ void PCHStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
void PCHStmtWriter::VisitStmtExpr(StmtExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getSubStmt());
+ Writer.AddStmt(E->getSubStmt());
Writer.AddSourceLocation(E->getLParenLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = pch::EXPR_STMT;
@@ -651,9 +725,9 @@ void PCHStmtWriter::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
void PCHStmtWriter::VisitChooseExpr(ChooseExpr *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getCond());
- Writer.WriteSubStmt(E->getLHS());
- Writer.WriteSubStmt(E->getRHS());
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = pch::EXPR_CHOOSE;
@@ -669,7 +743,7 @@ void PCHStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumSubExprs());
for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
- Writer.WriteSubStmt(E->getExpr(I));
+ Writer.AddStmt(E->getExpr(I));
Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
Code = pch::EXPR_SHUFFLE_VECTOR;
@@ -688,6 +762,7 @@ void PCHStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
Writer.AddSourceLocation(E->getLocation(), Record);
Record.push_back(E->isByRef());
Record.push_back(E->isConstQualAdded());
+ Writer.AddStmt(E->getCopyConstructorExpr());
Code = pch::EXPR_BLOCK_DECL_REF;
}
@@ -697,7 +772,7 @@ void PCHStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
void PCHStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
VisitExpr(E);
- Writer.WriteSubStmt(E->getString());
+ Writer.AddStmt(E->getString());
Writer.AddSourceLocation(E->getAtLoc(), Record);
Code = pch::EXPR_OBJC_STRING_LITERAL;
}
@@ -730,7 +805,7 @@ void PCHStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
VisitExpr(E);
Writer.AddDeclRef(E->getDecl(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
- Writer.WriteSubStmt(E->getBase());
+ Writer.AddStmt(E->getBase());
Record.push_back(E->isArrow());
Record.push_back(E->isFreeIvar());
Code = pch::EXPR_OBJC_IVAR_REF_EXPR;
@@ -740,7 +815,7 @@ void PCHStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
Writer.AddDeclRef(E->getProperty(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
- Writer.WriteSubStmt(E->getBase());
+ Writer.AddStmt(E->getBase());
Code = pch::EXPR_OBJC_PROPERTY_REF_EXPR;
}
@@ -752,7 +827,7 @@ void PCHStmtWriter::VisitObjCImplicitSetterGetterRefExpr(
// NOTE: InterfaceDecl and Base are mutually exclusive.
Writer.AddDeclRef(E->getInterfaceDecl(), Record);
- Writer.WriteSubStmt(E->getBase());
+ Writer.AddStmt(E->getBase());
Writer.AddSourceLocation(E->getLocation(), Record);
Writer.AddSourceLocation(E->getClassLoc(), Record);
Code = pch::EXPR_OBJC_KVC_REF_EXPR;
@@ -764,7 +839,7 @@ void PCHStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
- Writer.WriteSubStmt(E->getInstanceReceiver());
+ Writer.AddStmt(E->getInstanceReceiver());
break;
case ObjCMessageExpr::Class:
@@ -791,7 +866,7 @@ void PCHStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
- Writer.WriteSubStmt(*Arg);
+ Writer.AddStmt(*Arg);
Code = pch::EXPR_OBJC_MESSAGE_EXPR;
}
@@ -803,16 +878,16 @@ void PCHStmtWriter::VisitObjCSuperExpr(ObjCSuperExpr *E) {
void PCHStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
VisitStmt(S);
- Writer.WriteSubStmt(S->getElement());
- Writer.WriteSubStmt(S->getCollection());
- Writer.WriteSubStmt(S->getBody());
+ Writer.AddStmt(S->getElement());
+ Writer.AddStmt(S->getCollection());
+ Writer.AddStmt(S->getBody());
Writer.AddSourceLocation(S->getForLoc(), Record);
Writer.AddSourceLocation(S->getRParenLoc(), Record);
Code = pch::STMT_OBJC_FOR_COLLECTION;
}
void PCHStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- Writer.WriteSubStmt(S->getCatchBody());
+ Writer.AddStmt(S->getCatchBody());
Writer.AddDeclRef(S->getCatchParamDecl(), Record);
Writer.AddSourceLocation(S->getAtCatchLoc(), Record);
Writer.AddSourceLocation(S->getRParenLoc(), Record);
@@ -820,7 +895,7 @@ void PCHStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
}
void PCHStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
- Writer.WriteSubStmt(S->getFinallyBody());
+ Writer.AddStmt(S->getFinallyBody());
Writer.AddSourceLocation(S->getAtFinallyLoc(), Record);
Code = pch::STMT_OBJC_FINALLY;
}
@@ -828,24 +903,24 @@ void PCHStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
void PCHStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
Record.push_back(S->getNumCatchStmts());
Record.push_back(S->getFinallyStmt() != 0);
- Writer.WriteSubStmt(S->getTryBody());
+ Writer.AddStmt(S->getTryBody());
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
- Writer.WriteSubStmt(S->getCatchStmt(I));
+ Writer.AddStmt(S->getCatchStmt(I));
if (S->getFinallyStmt())
- Writer.WriteSubStmt(S->getFinallyStmt());
+ Writer.AddStmt(S->getFinallyStmt());
Writer.AddSourceLocation(S->getAtTryLoc(), Record);
Code = pch::STMT_OBJC_AT_TRY;
}
void PCHStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
- Writer.WriteSubStmt(S->getSynchExpr());
- Writer.WriteSubStmt(S->getSynchBody());
+ Writer.AddStmt(S->getSynchExpr());
+ Writer.AddStmt(S->getSynchBody());
Writer.AddSourceLocation(S->getAtSynchronizedLoc(), Record);
Code = pch::STMT_OBJC_AT_SYNCHRONIZED;
}
void PCHStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
- Writer.WriteSubStmt(S->getThrowExpr());
+ Writer.AddStmt(S->getThrowExpr());
Writer.AddSourceLocation(S->getThrowLoc(), Record);
Code = pch::STMT_OBJC_AT_THROW;
}
@@ -867,17 +942,24 @@ void PCHStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
void PCHStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddStmt(E->getArg(I));
Writer.AddDeclRef(E->getConstructor(), Record);
Writer.AddSourceLocation(E->getLocation(), Record);
Record.push_back(E->isElidable());
Record.push_back(E->requiresZeroInitialization());
- Record.push_back(E->getNumArgs());
- for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- Writer.WriteSubStmt(E->getArg(I));
Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
Code = pch::EXPR_CXX_CONSTRUCT;
}
+void PCHStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_CXX_TEMPORARY_OBJECT;
+}
+
void PCHStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
Writer.AddSourceLocation(E->getOperatorLoc(), Record);
@@ -930,7 +1012,7 @@ void PCHStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
Code = pch::EXPR_CXX_TYPEID_TYPE;
} else {
- Writer.WriteSubStmt(E->getExprOperand());
+ Writer.AddStmt(E->getExprOperand());
Code = pch::EXPR_CXX_TYPEID_EXPR;
}
}
@@ -945,19 +1027,20 @@ void PCHStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
void PCHStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getThrowLoc(), Record);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Code = pch::EXPR_CXX_THROW;
}
void PCHStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
+
+ bool HasOtherExprStored = E->Param.getInt();
+ // Store these first, the reader reads them before creation.
+ Record.push_back(HasOtherExprStored);
+ if (HasOtherExprStored)
+ Writer.AddStmt(E->getExpr());
+ Writer.AddDeclRef(E->getParam(), Record);
Writer.AddSourceLocation(E->getUsedLocation(), Record);
- if (E->isExprStored()) {
- Record.push_back(1);
- Writer.WriteSubStmt(E->getExpr());
- } else {
- Record.push_back(0);
- }
Code = pch::EXPR_CXX_DEFAULT_ARG;
}
@@ -965,21 +1048,28 @@ void PCHStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
void PCHStmtWriter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
VisitExpr(E);
Writer.AddCXXTemporary(E->getTemporary(), Record);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Code = pch::EXPR_CXX_BIND_TEMPORARY;
}
-void PCHStmtWriter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+void PCHStmtWriter::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->extendsLifetime());
+ Record.push_back(E->requiresTemporaryCopy());
+ Code = pch::EXPR_CXX_BIND_REFERENCE;
+}
+
+void PCHStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
Writer.AddSourceLocation(E->getRParenLoc(), Record);
- Code = pch::EXPR_CXX_ZERO_INIT_VALUE;
+ Code = pch::EXPR_CXX_SCALAR_VALUE_INIT;
}
void PCHStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
Record.push_back(E->isGlobalNew());
- Record.push_back(E->isParenTypeId());
Record.push_back(E->hasInitializer());
Record.push_back(E->isArray());
Record.push_back(E->getNumPlacementArgs());
@@ -987,15 +1077,48 @@ void PCHStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
Writer.AddDeclRef(E->getOperatorNew(), Record);
Writer.AddDeclRef(E->getOperatorDelete(), Record);
Writer.AddDeclRef(E->getConstructor(), Record);
+ Writer.AddSourceRange(E->getTypeIdParens(), Record);
Writer.AddSourceLocation(E->getStartLoc(), Record);
Writer.AddSourceLocation(E->getEndLoc(), Record);
for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
I != e; ++I)
- Writer.WriteSubStmt(*I);
+ Writer.AddStmt(*I);
Code = pch::EXPR_CXX_NEW;
}
+void PCHStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isGlobalDelete());
+ Record.push_back(E->isArrayForm());
+ Writer.AddDeclRef(E->getOperatorDelete(), Record);
+ Writer.AddStmt(E->getArgument());
+ Writer.AddSourceLocation(E->getSourceRange().getBegin(), Record);
+
+ Code = pch::EXPR_CXX_DELETE;
+}
+
+void PCHStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
+
+ Writer.AddStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ Writer.AddTypeSourceInfo(E->getScopeTypeInfo(), Record);
+ Writer.AddSourceLocation(E->getColonColonLoc(), Record);
+ Writer.AddSourceLocation(E->getTildeLoc(), Record);
+
+ // PseudoDestructorTypeStorage.
+ Writer.AddIdentifierRef(E->getDestroyedTypeIdentifier(), Record);
+ if (E->getDestroyedTypeIdentifier())
+ Writer.AddSourceLocation(E->getDestroyedTypeLoc(), Record);
+ else
+ Writer.AddTypeSourceInfo(E->getDestroyedTypeInfo(), Record);
+
+ Code = pch::EXPR_CXX_PSEUDO_DESTRUCTOR;
+}
void PCHStmtWriter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
VisitExpr(E);
@@ -1003,10 +1126,132 @@ void PCHStmtWriter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
for (unsigned i = 0, e = E->getNumTemporaries(); i != e; ++i)
Writer.AddCXXTemporary(E->getTemporary(i), Record);
- Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddStmt(E->getSubExpr());
Code = pch::EXPR_CXX_EXPR_WITH_TEMPORARIES;
}
+void
+PCHStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ // Don't emit anything here, NumTemplateArgs must be emitted first.
+
+ if (E->hasExplicitTemplateArgs()) {
+ const ExplicitTemplateArgumentList &Args
+ = *E->getExplicitTemplateArgumentList();
+ assert(Args.NumTemplateArgs &&
+ "Num of template args was zero! PCH reading will mess up!");
+ Record.push_back(Args.NumTemplateArgs);
+ AddExplicitTemplateArgumentList(Args);
+ } else {
+ Record.push_back(0);
+ }
+
+ if (!E->isImplicitAccess())
+ Writer.AddStmt(E->getBase());
+ else
+ Writer.AddStmt(0);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ Writer.AddDeclRef(E->getFirstQualifierFoundInScope(), Record);
+ Writer.AddDeclarationName(E->getMember(), Record);
+ Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Code = pch::EXPR_CXX_DEPENDENT_SCOPE_MEMBER;
+}
+
+void
+PCHStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, NumTemplateArgs must be emitted first.
+
+ if (E->hasExplicitTemplateArgs()) {
+ const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs();
+ assert(Args.NumTemplateArgs &&
+ "Num of template args was zero! PCH reading will mess up!");
+ Record.push_back(Args.NumTemplateArgs);
+ AddExplicitTemplateArgumentList(Args);
+ } else {
+ Record.push_back(0);
+ }
+
+ Writer.AddDeclarationName(E->getDeclName(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Code = pch::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF;
+}
+
+void
+PCHStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->arg_size());
+ for (CXXUnresolvedConstructExpr::arg_iterator
+ ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI)
+ Writer.AddStmt(*ArgI);
+ Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
+ Writer.AddTypeRef(E->getTypeAsWritten(), Record);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_CXX_UNRESOLVED_CONSTRUCT;
+}
+
+void PCHStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, NumTemplateArgs must be emitted first.
+
+ if (E->hasExplicitTemplateArgs()) {
+ const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs();
+ assert(Args.NumTemplateArgs &&
+ "Num of template args was zero! PCH reading will mess up!");
+ Record.push_back(Args.NumTemplateArgs);
+ AddExplicitTemplateArgumentList(Args);
+ } else {
+ Record.push_back(0);
+ }
+
+ Record.push_back(E->getNumDecls());
+ for (OverloadExpr::decls_iterator
+ OvI = E->decls_begin(), OvE = E->decls_end(); OvI != OvE; ++OvI) {
+ Writer.AddDeclRef(OvI.getDecl(), Record);
+ Record.push_back(OvI.getAccess());
+ }
+
+ Writer.AddDeclarationName(E->getName(), Record);
+ Writer.AddNestedNameSpecifier(E->getQualifier(), Record);
+ Writer.AddSourceRange(E->getQualifierRange(), Record);
+ Writer.AddSourceLocation(E->getNameLoc(), Record);
+}
+
+void PCHStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->isArrow());
+ Record.push_back(E->hasUnresolvedUsing());
+ Writer.AddStmt(!E->isImplicitAccess() ? E->getBase() : 0);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = pch::EXPR_CXX_UNRESOLVED_MEMBER;
+}
+
+void PCHStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->requiresADL());
+ Record.push_back(E->isOverloaded());
+ Writer.AddDeclRef(E->getNamingClass(), Record);
+ Code = pch::EXPR_CXX_UNRESOLVED_LOOKUP;
+}
+
+void PCHStmtWriter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeRef(E->getQueriedType(), Record);
+ Code = pch::EXPR_CXX_UNARY_TYPE_TRAIT;
+}
//===----------------------------------------------------------------------===//
// PCHWriter Implementation
@@ -1044,21 +1289,38 @@ void PCHWriter::WriteSubStmt(Stmt *S) {
RecordData Record;
PCHStmtWriter Writer(*this, Record);
++NumStatements;
-
+
if (!S) {
Stream.EmitRecord(pch::STMT_NULL_PTR, Record);
return;
}
+ // Redirect PCHWriter::AddStmt to collect sub stmts.
+ llvm::SmallVector<Stmt *, 16> SubStmts;
+ CollectedStmts = &SubStmts;
+
Writer.Code = pch::STMT_NULL_PTR;
Writer.Visit(S);
#ifndef NDEBUG
if (Writer.Code == pch::STMT_NULL_PTR) {
- S->dump();
+ SourceManager &SrcMgr
+ = DeclIDs.begin()->first->getASTContext().getSourceManager();
+ S->dump(SrcMgr);
assert(0 && "Unhandled sub statement writing PCH file");
}
#endif
+
+ // Revert PCHWriter::AddStmt.
+ CollectedStmts = &StmtsToEmit;
+
+ // Write the sub stmts in reverse order, last to first. When reading them back
+ // we will read them in correct order by "pop"ing them from the Stmts stack.
+ // This simplifies reading and allows to store a variable number of sub stmts
+ // without knowing it in advance.
+ while (!SubStmts.empty())
+ WriteSubStmt(SubStmts.pop_back_val());
+
Stream.EmitRecord(Writer.Code, Record);
}
@@ -1066,34 +1328,16 @@ void PCHWriter::WriteSubStmt(Stmt *S) {
/// queue via AddStmt().
void PCHWriter::FlushStmts() {
RecordData Record;
- PCHStmtWriter Writer(*this, Record);
for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
- ++NumStatements;
- Stmt *S = StmtsToEmit[I];
-
- if (!S) {
- Stream.EmitRecord(pch::STMT_NULL_PTR, Record);
- continue;
- }
-
- Writer.Code = pch::STMT_NULL_PTR;
- Writer.Visit(S);
-#ifndef NDEBUG
- if (Writer.Code == pch::STMT_NULL_PTR) {
- S->dump();
- assert(0 && "Unhandled expression writing PCH file");
- }
-#endif
- Stream.EmitRecord(Writer.Code, Record);
-
+ WriteSubStmt(StmtsToEmit[I]);
+
assert(N == StmtsToEmit.size() &&
"Substatement writen via AddStmt rather than WriteSubStmt!");
// Note that we are at the end of a full expression. Any
// expression records that follow this one are part of a different
// expression.
- Record.clear();
Stream.EmitRecord(pch::STMT_STOP, Record);
}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp
index b032233..9220677 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp
@@ -819,7 +819,8 @@ namespace {
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId, Declarator &D,
+ SourceRange TypeIdParens,
+ Declarator &D,
SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
SourceLocation ConstructorRParen) {
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index b6c18b7..73bca9a 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -23,6 +23,7 @@
#include "clang/Lex/TokenConcatenation.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Config/config.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
@@ -117,7 +118,7 @@ public:
virtual void Ident(SourceLocation Loc, const std::string &str);
virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
const std::string &Str);
-
+ virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str);
bool HandleFirstTokOnLine(Token &Tok);
bool MoveToLine(SourceLocation Loc) {
@@ -174,20 +175,6 @@ void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
/// #line directive. This returns false if already at the specified line, true
/// if some newlines were emitted.
bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
- if (DisableLineMarkers) {
- if (LineNo == CurLine) return false;
-
- CurLine = LineNo;
-
- if (!EmittedTokensOnThisLine && !EmittedMacroOnThisLine)
- return true;
-
- OS << '\n';
- EmittedTokensOnThisLine = false;
- EmittedMacroOnThisLine = false;
- return true;
- }
-
// If this line is "close enough" to the original line, just print newlines,
// otherwise print a #line directive.
if (LineNo-CurLine <= 8) {
@@ -199,8 +186,17 @@ bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
const char *NewLines = "\n\n\n\n\n\n\n\n";
OS.write(NewLines, LineNo-CurLine);
}
- } else {
+ } else if (!DisableLineMarkers) {
+ // Emit a #line or line marker.
WriteLineInfo(LineNo, 0, 0);
+ } else {
+ // Okay, we're in -P mode, which turns off line markers. However, we still
+ // need to emit a newline between tokens on different lines.
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ }
}
CurLine = LineNo;
@@ -311,6 +307,29 @@ void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
EmittedTokensOnThisLine = true;
}
+void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
+ llvm::StringRef Str) {
+ MoveToLine(Loc);
+ OS << "#pragma message(";
+
+ OS << '"';
+
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char Char = Str[i];
+ if (isprint(Char) && Char != '\\' && Char != '"')
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ }
+ OS << '"';
+
+ OS << ')';
+ EmittedTokensOnThisLine = true;
+}
+
/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
/// is called for the first token on each new line. If this really is the start
@@ -372,7 +391,7 @@ struct UnknownPragmaHandler : public PragmaHandler {
PrintPPOutputPPCallbacks *Callbacks;
UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
- : PragmaHandler(0), Prefix(prefix), Callbacks(callbacks) {}
+ : Prefix(prefix), Callbacks(callbacks) {}
virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
@@ -397,8 +416,9 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
PrintPPOutputPPCallbacks *Callbacks,
llvm::raw_ostream &OS) {
char Buffer[256];
- Token PrevPrevTok;
- Token PrevTok;
+ Token PrevPrevTok, PrevTok;
+ PrevPrevTok.startToken();
+ PrevTok.startToken();
while (1) {
// If this token is at the start of a line, emit newlines if needed.
@@ -454,6 +474,9 @@ static int MacroIDCompare(const void* a, const void* b) {
}
static void DoPrintMacros(Preprocessor &PP, llvm::raw_ostream *OS) {
+ // Ignore unknown pragmas.
+ PP.AddPragmaHandler(new EmptyPragmaHandler());
+
// -dM mode just scans and ignores all tokens in the files, then dumps out
// the macro table at the end.
PP.EnterMainSourceFile();
@@ -494,7 +517,7 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream *OS,
PrintPPOutputPPCallbacks *Callbacks =
new PrintPPOutputPPCallbacks(PP, *OS, !Opts.ShowLineMarkers,
Opts.ShowMacros);
- PP.AddPragmaHandler(0, new UnknownPragmaHandler("#pragma", Callbacks));
+ PP.AddPragmaHandler(new UnknownPragmaHandler("#pragma", Callbacks));
PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",
Callbacks));
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 6ccf4f1..1b5b7e2 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -70,7 +70,7 @@ PrintIncludeStack(SourceLocation Loc, const SourceManager &SM) {
/// HighlightRange - Given a SourceRange and a line number, highlight (with ~'s)
/// any characters in LineNo that intersect the SourceRange.
-void TextDiagnosticPrinter::HighlightRange(const SourceRange &R,
+void TextDiagnosticPrinter::HighlightRange(const CharSourceRange &R,
const SourceManager &SM,
unsigned LineNo, FileID FID,
std::string &CaretLine,
@@ -112,8 +112,10 @@ void TextDiagnosticPrinter::HighlightRange(const SourceRange &R,
if (EndColNo) {
--EndColNo; // Zero base the col #.
- // Add in the length of the token, so that we cover multi-char tokens.
- EndColNo += Lexer::MeasureTokenLength(End, SM, *LangOpts);
+ // Add in the length of the token, so that we cover multi-char tokens if
+ // this is a token range.
+ if (R.isTokenRange())
+ EndColNo += Lexer::MeasureTokenLength(End, SM, *LangOpts);
} else {
EndColNo = CaretLine.size();
}
@@ -121,21 +123,24 @@ void TextDiagnosticPrinter::HighlightRange(const SourceRange &R,
assert(StartColNo <= EndColNo && "Invalid range!");
- // Pick the first non-whitespace column.
- while (StartColNo < SourceLine.size() &&
- (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
- ++StartColNo;
-
- // Pick the last non-whitespace column.
- if (EndColNo > SourceLine.size())
- EndColNo = SourceLine.size();
- while (EndColNo-1 &&
- (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
- --EndColNo;
-
- // If the start/end passed each other, then we are trying to highlight a range
- // that just exists in whitespace, which must be some sort of other bug.
- assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
+ // Check that a token range does not highlight only whitespace.
+ if (R.isTokenRange()) {
+ // Pick the first non-whitespace column.
+ while (StartColNo < SourceLine.size() &&
+ (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
+ ++StartColNo;
+
+ // Pick the last non-whitespace column.
+ if (EndColNo > SourceLine.size())
+ EndColNo = SourceLine.size();
+ while (EndColNo-1 &&
+ (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
+ --EndColNo;
+
+ // If the start/end passed each other, then we are trying to highlight a range
+ // that just exists in whitespace, which must be some sort of other bug.
+ assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
+ }
// Fill the range with ~'s.
for (unsigned i = StartColNo; i < EndColNo; ++i)
@@ -281,7 +286,7 @@ static void SelectInterestingSourceRegion(std::string &SourceLine,
}
void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
- SourceRange *Ranges,
+ CharSourceRange *Ranges,
unsigned NumRanges,
const SourceManager &SM,
const FixItHint *Hints,
@@ -312,10 +317,12 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
// Map the ranges.
for (unsigned i = 0; i != NumRanges; ++i) {
- SourceLocation S = Ranges[i].getBegin(), E = Ranges[i].getEnd();
- if (S.isMacroID()) S = SM.getImmediateSpellingLoc(S);
- if (E.isMacroID()) E = SM.getImmediateSpellingLoc(E);
- Ranges[i] = SourceRange(S, E);
+ CharSourceRange &R = Ranges[i];
+ SourceLocation S = R.getBegin(), E = R.getEnd();
+ if (S.isMacroID())
+ R.setBegin(SM.getImmediateSpellingLoc(S));
+ if (E.isMacroID())
+ R.setEnd(SM.getImmediateSpellingLoc(E));
}
if (!Suppressed) {
@@ -777,7 +784,9 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
continue;
// Add in the length of the token, so that we cover multi-char tokens.
- unsigned TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
+ unsigned TokSize = 0;
+ if (Info.getRange(i).isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
<< SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
@@ -904,15 +913,15 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
LastCaretDiagnosticWasNote = (Level == Diagnostic::Note);
// Get the ranges into a local array we can hack on.
- SourceRange Ranges[20];
+ CharSourceRange Ranges[20];
unsigned NumRanges = Info.getNumRanges();
assert(NumRanges < 20 && "Out of space");
for (unsigned i = 0; i != NumRanges; ++i)
Ranges[i] = Info.getRange(i);
unsigned NumHints = Info.getNumFixItHints();
- for (unsigned idx = 0; idx < NumHints; ++idx) {
- const FixItHint &Hint = Info.getFixItHint(idx);
+ for (unsigned i = 0; i != NumHints; ++i) {
+ const FixItHint &Hint = Info.getFixItHint(i);
if (Hint.RemoveRange.isValid()) {
assert(NumRanges < 20 && "Out of space");
Ranges[NumRanges++] = Hint.RemoveRange;
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
index 84c4f5d..8cc5616 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
+++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
@@ -35,6 +35,8 @@ void clang::ProcessWarningOptions(Diagnostic &Diags,
const DiagnosticOptions &Opts) {
Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings);
+ Diags.setShowOverloads(
+ static_cast<Diagnostic::OverloadsShown>(Opts.ShowOverloads));
// Handle -ferror-limit
if (Opts.ErrorLimit)
diff --git a/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt
index 047fdb3..97a99d6 100644
--- a/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt
@@ -1,6 +1,5 @@
set(files
altivec.h
- arm_neon.h
emmintrin.h
float.h
iso646.h
@@ -22,6 +21,14 @@ else ()
set(output_dir ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/include)
endif ()
+# Generate arm_neon.h
+set(LLVM_TARGET_DEFINITIONS ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td)
+tablegen(arm_neon.h.inc -gen-arm-neon)
+
+add_custom_command(OUTPUT ${output_dir}/arm_neon.h
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc ${output_dir}/arm_neon.h
+ COMMENT "Copying clang's arm_neon.h...")
foreach( f ${files} )
set( src ${CMAKE_CURRENT_SOURCE_DIR}/${f} )
@@ -33,8 +40,8 @@ foreach( f ${files} )
endforeach( f )
add_custom_target(clang-headers ALL
- DEPENDS ${files})
+ DEPENDS ${files} ${output_dir}/arm_neon.h)
-install(FILES ${files}
+install(FILES ${files} ${output_dir}/arm_neon.h
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
diff --git a/contrib/llvm/tools/clang/lib/Headers/Makefile b/contrib/llvm/tools/clang/lib/Headers/Makefile
index cb36e84..ebb8384 100644
--- a/contrib/llvm/tools/clang/lib/Headers/Makefile
+++ b/contrib/llvm/tools/clang/lib/Headers/Makefile
@@ -7,10 +7,15 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-include $(LEVEL)/Makefile.common
+CLANG_LEVEL := ../..
-CLANG_VERSION := $(shell cat $(PROJ_SRC_DIR)/../../VER)
+BUILT_SOURCES = arm_neon.h.inc
+TABLEGEN_INC_FILES_COMMON = 1
+
+include $(CLANG_LEVEL)/Makefile
+
+CLANG_VERSION := $(word 3,$(shell grep "CLANG_VERSION " \
+ $(PROJ_OBJ_DIR)/$(CLANG_LEVEL)/include/clang/Basic/Version.inc))
HeaderDir := $(PROJ_OBJ_ROOT)/$(BuildMode)/lib/clang/$(CLANG_VERSION)/include
@@ -19,7 +24,11 @@ HEADERS := $(notdir $(wildcard $(PROJ_SRC_DIR)/*.h))
OBJHEADERS := $(addprefix $(HeaderDir)/, $(HEADERS))
-$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir
+$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir $(HeaderDir)/arm_neon.h
+ $(Verb) cp $< $@
+ $(Echo) Copying $(notdir $<) to build dir
+
+$(HeaderDir)/arm_neon.h: $(BUILT_SOURCES) $(HeaderDir)/.dir
$(Verb) cp $< $@
$(Echo) Copying $(notdir $<) to build dir
@@ -38,3 +47,7 @@ $(INSTHEADERS): $(PROJ_headers)/%.h: $(HeaderDir)/%.h | $(PROJ_headers)
$(Echo) Installing compiler include file: $(notdir $<)
install-local:: $(INSTHEADERS)
+
+$(ObjDir)/arm_neon.h.inc.tmp : $(CLANG_LEVEL)/include/clang/Basic/arm_neon.td $(TBLGEN) $(ObjDir)/.dir
+ $(Echo) "Building Clang arm_neon.h.inc with tblgen"
+ $(Verb) $(TableGen) -gen-arm-neon -o $(call SYSPATH, $@) $<
diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h
index 1cd0db8..d3d5ad9 100644
--- a/contrib/llvm/tools/clang/lib/Headers/altivec.h
+++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h
@@ -20,6 +20,9 @@
*
\*===----------------------------------------------------------------------===*/
+// TODO: add functions for 'vector bool ..' and 'vector pixel' argument types according to
+// the 'AltiVec Technology Programming Interface Manual'
+
#ifndef __ALTIVEC_H
#define __ALTIVEC_H
@@ -34,534 +37,629 @@
#define __CR6_LT 2
#define __CR6_LT_REV 3
-#define _ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+static vector signed char __ATTRS_o_ai
+vec_perm(vector signed char a, vector signed char b, vector unsigned char c);
+
+static vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c);
+
+static vector short __ATTRS_o_ai
+vec_perm(vector short a, vector short b, vector unsigned char c);
+
+static vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c);
+
+static vector int __ATTRS_o_ai
+vec_perm(vector int a, vector int b, vector unsigned char c);
+
+static vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c);
+
+static vector float __ATTRS_o_ai
+vec_perm(vector float a, vector float b, vector unsigned char c);
/* vec_abs */
-#define __builtin_vec_abs vec_abs
#define __builtin_altivec_abs_v16qi vec_abs
#define __builtin_altivec_abs_v8hi vec_abs
#define __builtin_altivec_abs_v4si vec_abs
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_abs(vector signed char a)
{
return __builtin_altivec_vmaxsb(a, -a);
}
-static vector signed short _ATTRS_o_ai
+static vector signed short __ATTRS_o_ai
vec_abs(vector signed short a)
{
return __builtin_altivec_vmaxsh(a, -a);
}
-static vector signed int _ATTRS_o_ai
+static vector signed int __ATTRS_o_ai
vec_abs(vector signed int a)
{
return __builtin_altivec_vmaxsw(a, -a);
}
-static vector float _ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_abs(vector float a)
{
- vector unsigned int res = (vector unsigned int)a &
- (vector unsigned int)(0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF);
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)(0x7FFFFFFF);
return (vector float)res;
}
/* vec_abss */
-#define __builtin_vec_abss vec_abss
#define __builtin_altivec_abss_v16qi vec_abss
#define __builtin_altivec_abss_v8hi vec_abss
#define __builtin_altivec_abss_v4si vec_abss
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_abss(vector signed char a)
{
- return __builtin_altivec_vmaxsb(a, __builtin_altivec_vsubsbs(
- (vector signed char)(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), a));
+ return __builtin_altivec_vmaxsb(a, __builtin_altivec_vsubsbs((vector signed char)(0), a));
}
-static vector signed short _ATTRS_o_ai
+static vector signed short __ATTRS_o_ai
vec_abss(vector signed short a)
{
- return __builtin_altivec_vmaxsh(a, __builtin_altivec_vsubshs(
- (vector signed short)(0, 0, 0, 0, 0, 0, 0, 0), a));
+ return __builtin_altivec_vmaxsh(a, __builtin_altivec_vsubshs((vector signed short)(0), a));
}
-static vector signed int _ATTRS_o_ai
+static vector signed int __ATTRS_o_ai
vec_abss(vector signed int a)
{
- return __builtin_altivec_vmaxsw(a, __builtin_altivec_vsubsws(
- (vector signed int)(0, 0, 0, 0), a));
+ return __builtin_altivec_vmaxsw(a, __builtin_altivec_vsubsws((vector signed int)(0), a));
}
/* vec_add */
-#define __builtin_altivec_vaddubm vec_add
-#define __builtin_altivec_vadduhm vec_add
-#define __builtin_altivec_vadduwm vec_add
-#define __builtin_altivec_vaddfp vec_add
-#define __builtin_vec_vaddubm vec_add
-#define __builtin_vec_vadduhm vec_add
-#define __builtin_vec_vadduwm vec_add
-#define __builtin_vec_vaddfp vec_add
-#define vec_vaddubm vec_add
-#define vec_vadduhm vec_add
-#define vec_vadduwm vec_add
-#define vec_vaddfp vec_add
-
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_add(vector signed char a, vector signed char b)
{
return a + b;
}
-static vector unsigned char _ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_add(vector unsigned char a, vector unsigned char b)
{
return a + b;
}
-static vector short _ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_add(vector short a, vector short b)
{
return a + b;
}
-static vector unsigned short _ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_add(vector unsigned short a, vector unsigned short b)
{
return a + b;
}
-static vector int _ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_add(vector int a, vector int b)
{
return a + b;
}
-static vector unsigned int _ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_add(vector unsigned int a, vector unsigned int b)
{
return a + b;
}
-static vector float _ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_add(vector float a, vector float b)
{
return a + b;
}
+/* vec_vaddubm */
+
+#define __builtin_altivec_vaddubm vec_vaddubm
+
+static vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char a, vector signed char b)
+{
+ return a + b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char a, vector unsigned char b)
+{
+ return a + b;
+}
+
+/* vec_vadduhm */
+
+#define __builtin_altivec_vadduhm vec_vadduhm
+
+static vector short __ATTRS_o_ai
+vec_vadduhm(vector short a, vector short b)
+{
+ return a + b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short a, vector unsigned short b)
+{
+ return a + b;
+}
+
+/* vec_vadduwm */
+
+#define __builtin_altivec_vadduwm vec_vadduwm
+
+static vector int __ATTRS_o_ai
+vec_vadduwm(vector int a, vector int b)
+{
+ return a + b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int a, vector unsigned int b)
+{
+ return a + b;
+}
+
+/* vec_vaddfp */
+
+#define __builtin_altivec_vaddfp vec_vaddfp
+
+static vector float __attribute__((__always_inline__))
+vec_vaddfp(vector float a, vector float b)
+{
+ return a + b;
+}
+
/* vec_addc */
-#define __builtin_vec_addc __builtin_altivec_vaddcuw
-#define vec_vaddcuw __builtin_altivec_vaddcuw
-#define vec_addc __builtin_altivec_vaddcuw
+static vector unsigned int __attribute__((__always_inline__))
+vec_addc(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vaddcuw(a, b);
+}
+
+/* vec_vaddcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vaddcuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vaddcuw(a, b);
+}
/* vec_adds */
-#define __builtin_vec_vaddsbs __builtin_altivec_vaddsbs
-#define __builtin_vec_vaddubs __builtin_altivec_vaddubs
-#define __builtin_vec_vaddshs __builtin_altivec_vaddshs
-#define __builtin_vec_vadduhs __builtin_altivec_vadduhs
-#define __builtin_vec_vaddsws __builtin_altivec_vaddsws
-#define __builtin_vec_vadduws __builtin_altivec_vadduws
-#define vec_vaddsbs __builtin_altivec_vaddsbs
-#define vec_vaddubs __builtin_altivec_vaddubs
-#define vec_vaddshs __builtin_altivec_vaddshs
-#define vec_vadduhs __builtin_altivec_vadduhs
-#define vec_vaddsws __builtin_altivec_vaddsws
-#define vec_vadduws __builtin_altivec_vadduws
-
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_adds(vector signed char a, vector signed char b)
{
return __builtin_altivec_vaddsbs(a, b);
}
-static vector unsigned char _ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_adds(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vaddubs(a, b);
}
-static vector short _ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_adds(vector short a, vector short b)
{
return __builtin_altivec_vaddshs(a, b);
}
-static vector unsigned short _ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_adds(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vadduhs(a, b);
}
-static vector int _ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_adds(vector int a, vector int b)
{
return __builtin_altivec_vaddsws(a, b);
}
-static vector unsigned int _ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_adds(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vadduws(a, b);
}
-/* vec_sub */
+/* vec_vaddsbs */
-#define __builtin_altivec_vsububm vec_sub
-#define __builtin_altivec_vsubuhm vec_sub
-#define __builtin_altivec_vsubuwm vec_sub
-#define __builtin_altivec_vsubfp vec_sub
-#define __builtin_vec_vsububm vec_sub
-#define __builtin_vec_vsubuhm vec_sub
-#define __builtin_vec_vsubuwm vec_sub
-#define __builtin_vec_vsubfp vec_sub
-#define vec_vsububm vec_sub
-#define vec_vsubuhm vec_sub
-#define vec_vsubuwm vec_sub
-#define vec_vsubfp vec_sub
-
-static vector signed char _ATTRS_o_ai
-vec_sub(vector signed char a, vector signed char b)
+static vector signed char __attribute__((__always_inline__))
+vec_vaddsbs(vector signed char a, vector signed char b)
{
- return a - b;
+ return __builtin_altivec_vaddsbs(a, b);
}
-static vector unsigned char _ATTRS_o_ai
-vec_sub(vector unsigned char a, vector unsigned char b)
+/* vec_vaddubs */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vaddubs(vector unsigned char a, vector unsigned char b)
{
- return a - b;
+ return __builtin_altivec_vaddubs(a, b);
}
-static vector short _ATTRS_o_ai
-vec_sub(vector short a, vector short b)
+/* vec_vaddshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vaddshs(vector short a, vector short b)
{
- return a - b;
+ return __builtin_altivec_vaddshs(a, b);
}
-static vector unsigned short _ATTRS_o_ai
-vec_sub(vector unsigned short a, vector unsigned short b)
+/* vec_vadduhs */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vadduhs(vector unsigned short a, vector unsigned short b)
{
- return a - b;
+ return __builtin_altivec_vadduhs(a, b);
}
-static vector int _ATTRS_o_ai
-vec_sub(vector int a, vector int b)
+/* vec_vaddsws */
+
+static vector int __attribute__((__always_inline__))
+vec_vaddsws(vector int a, vector int b)
{
- return a - b;
+ return __builtin_altivec_vaddsws(a, b);
}
-static vector unsigned int _ATTRS_o_ai
-vec_sub(vector unsigned int a, vector unsigned int b)
+/* vec_vadduws */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vadduws(vector unsigned int a, vector unsigned int b)
{
- return a - b;
+ return __builtin_altivec_vadduws(a, b);
}
-static vector float _ATTRS_o_ai
-vec_sub(vector float a, vector float b)
+/* vec_and */
+
+#define __builtin_altivec_vand vec_and
+
+static vector signed char __ATTRS_o_ai
+vec_and(vector signed char a, vector signed char b)
{
- return a - b;
+ return a & b;
}
-/* vec_subs */
+static vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char a, vector unsigned char b)
+{
+ return a & b;
+}
-#define __builtin_vec_vsubsbs __builtin_altivec_vsubsbs
-#define __builtin_vec_vsububs __builtin_altivec_vsububs
-#define __builtin_vec_vsubshs __builtin_altivec_vsubshs
-#define __builtin_vec_vsubuhs __builtin_altivec_vsubuhs
-#define __builtin_vec_vsubsws __builtin_altivec_vsubsws
-#define __builtin_vec_vsubuws __builtin_altivec_vsubuws
-#define vec_vsubsbs __builtin_altivec_vsubsbs
-#define vec_vsububs __builtin_altivec_vsububs
-#define vec_vsubshs __builtin_altivec_vsubshs
-#define vec_vsubuhs __builtin_altivec_vsubuhs
-#define vec_vsubsws __builtin_altivec_vsubsws
-#define vec_vsubuws __builtin_altivec_vsubuws
-
-static vector signed char _ATTRS_o_ai
-vec_subs(vector signed char a, vector signed char b)
+static vector short __ATTRS_o_ai
+vec_and(vector short a, vector short b)
{
- return __builtin_altivec_vsubsbs(a, b);
+ return a & b;
}
-static vector unsigned char _ATTRS_o_ai
-vec_subs(vector unsigned char a, vector unsigned char b)
+static vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vsububs(a, b);
+ return a & b;
}
-static vector short _ATTRS_o_ai
-vec_subs(vector short a, vector short b)
+static vector int __ATTRS_o_ai
+vec_and(vector int a, vector int b)
{
- return __builtin_altivec_vsubshs(a, b);
+ return a & b;
}
-static vector unsigned short _ATTRS_o_ai
-vec_subs(vector unsigned short a, vector unsigned short b)
+static vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int a, vector unsigned int b)
{
- return __builtin_altivec_vsubuhs(a, b);
+ return a & b;
}
-static vector int _ATTRS_o_ai
-vec_subs(vector int a, vector int b)
+static vector float __ATTRS_o_ai
+vec_and(vector float a, vector float b)
{
- return __builtin_altivec_vsubsws(a, b);
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
}
-static vector unsigned int _ATTRS_o_ai
-vec_subs(vector unsigned int a, vector unsigned int b)
+/* vec_vand */
+
+static vector signed char __ATTRS_o_ai
+vec_vand(vector signed char a, vector signed char b)
{
- return __builtin_altivec_vsubuws(a, b);
+ return a & b;
}
-/* vec_avg */
+static vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char a, vector unsigned char b)
+{
+ return a & b;
+}
-#define __builtin_vec_vavgsb __builtin_altivec_vavgsb
-#define __builtin_vec_vavgub __builtin_altivec_vavgub
-#define __builtin_vec_vavgsh __builtin_altivec_vavgsh
-#define __builtin_vec_vavguh __builtin_altivec_vavguh
-#define __builtin_vec_vavgsw __builtin_altivec_vavgsw
-#define __builtin_vec_vavguw __builtin_altivec_vavguw
-#define vec_vavgsb __builtin_altivec_vavgsb
-#define vec_vavgub __builtin_altivec_vavgub
-#define vec_vavgsh __builtin_altivec_vavgsh
-#define vec_vavguh __builtin_altivec_vavguh
-#define vec_vavgsw __builtin_altivec_vavgsw
-#define vec_vavguw __builtin_altivec_vavguw
-
-static vector signed char _ATTRS_o_ai
-vec_avg(vector signed char a, vector signed char b)
+static vector short __ATTRS_o_ai
+vec_vand(vector short a, vector short b)
{
- return __builtin_altivec_vavgsb(a, b);
+ return a & b;
}
-static vector unsigned char _ATTRS_o_ai
-vec_avg(vector unsigned char a, vector unsigned char b)
+static vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vavgub(a, b);
+ return a & b;
}
-static vector short _ATTRS_o_ai
-vec_avg(vector short a, vector short b)
+static vector int __ATTRS_o_ai
+vec_vand(vector int a, vector int b)
{
- return __builtin_altivec_vavgsh(a, b);
+ return a & b;
}
-static vector unsigned short _ATTRS_o_ai
-vec_avg(vector unsigned short a, vector unsigned short b)
+static vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int a, vector unsigned int b)
{
- return __builtin_altivec_vavguh(a, b);
+ return a & b;
}
-static vector int _ATTRS_o_ai
-vec_avg(vector int a, vector int b)
+static vector float __ATTRS_o_ai
+vec_vand(vector float a, vector float b)
{
- return __builtin_altivec_vavgsw(a, b);
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
}
-static vector unsigned int _ATTRS_o_ai
-vec_avg(vector unsigned int a, vector unsigned int b)
+/* vec_andc */
+
+#define __builtin_altivec_vandc vec_andc
+
+static vector signed char __ATTRS_o_ai
+vec_andc(vector signed char a, vector signed char b)
{
- return __builtin_altivec_vavguw(a, b);
+ return a & ~b;
}
-/* vec_st */
+static vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char a, vector unsigned char b)
+{
+ return a & ~b;
+}
-#define __builtin_vec_st vec_st
-#define vec_stvx vec_st
+static vector short __ATTRS_o_ai
+vec_andc(vector short a, vector short b)
+{
+ return a & ~b;
+}
-static void _ATTRS_o_ai
-vec_st(vector signed char a, int b, vector signed char *c)
+static vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short a, vector unsigned short b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_st(vector unsigned char a, int b, vector unsigned char *c)
+static vector int __ATTRS_o_ai
+vec_andc(vector int a, vector int b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_st(vector short a, int b, vector short *c)
+static vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int a, vector unsigned int b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_st(vector unsigned short a, int b, vector unsigned short *c)
+static vector float __ATTRS_o_ai
+vec_andc(vector float a, vector float b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
}
-static void _ATTRS_o_ai
-vec_st(vector int a, int b, vector int *c)
+/* vec_vandc */
+
+static vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char a, vector signed char b)
{
- __builtin_altivec_stvx(a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_st(vector unsigned int a, int b, vector unsigned int *c)
+static vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char a, vector unsigned char b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_st(vector float a, int b, vector float *c)
+static vector short __ATTRS_o_ai
+vec_vandc(vector short a, vector short b)
{
- __builtin_altivec_stvx((vector int)a, b, (void *)c);
+ return a & ~b;
}
-/* vec_stl */
+static vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short a, vector unsigned short b)
+{
+ return a & ~b;
+}
-#define __builtin_vec_stl vec_stl
-#define vec_stvxl vec_stl
+static vector int __ATTRS_o_ai
+vec_vandc(vector int a, vector int b)
+{
+ return a & ~b;
+}
-static void _ATTRS_o_ai
-vec_stl(vector signed char a, int b, vector signed char *c)
+static vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int a, vector unsigned int b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ return a & ~b;
}
-static void _ATTRS_o_ai
-vec_stl(vector unsigned char a, int b, vector unsigned char *c)
+static vector float __ATTRS_o_ai
+vec_vandc(vector float a, vector float b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
}
-static void _ATTRS_o_ai
-vec_stl(vector short a, int b, vector short *c)
+/* vec_avg */
+
+static vector signed char __ATTRS_o_ai
+vec_avg(vector signed char a, vector signed char b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ return __builtin_altivec_vavgsb(a, b);
}
-static void _ATTRS_o_ai
-vec_stl(vector unsigned short a, int b, vector unsigned short *c)
+static vector unsigned char __ATTRS_o_ai
+vec_avg(vector unsigned char a, vector unsigned char b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ return __builtin_altivec_vavgub(a, b);
}
-static void _ATTRS_o_ai
-vec_stl(vector int a, int b, vector int *c)
+static vector short __ATTRS_o_ai
+vec_avg(vector short a, vector short b)
{
- __builtin_altivec_stvxl(a, b, (void *)c);
+ return __builtin_altivec_vavgsh(a, b);
}
-static void _ATTRS_o_ai
-vec_stl(vector unsigned int a, int b, vector unsigned int *c)
+static vector unsigned short __ATTRS_o_ai
+vec_avg(vector unsigned short a, vector unsigned short b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ return __builtin_altivec_vavguh(a, b);
}
-static void _ATTRS_o_ai
-vec_stl(vector float a, int b, vector float *c)
+static vector int __ATTRS_o_ai
+vec_avg(vector int a, vector int b)
{
- __builtin_altivec_stvxl((vector int)a, b, (void *)c);
+ return __builtin_altivec_vavgsw(a, b);
}
-/* vec_ste */
+static vector unsigned int __ATTRS_o_ai
+vec_avg(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vavguw(a, b);
+}
-#define __builtin_vec_stvebx __builtin_altivec_stvebx
-#define __builtin_vec_stvehx __builtin_altivec_stvehx
-#define __builtin_vec_stvewx __builtin_altivec_stvewx
-#define vec_stvebx __builtin_altivec_stvebx
-#define vec_stvehx __builtin_altivec_stvehx
-#define vec_stvewx __builtin_altivec_stvewx
+/* vec_vavgsb */
-static void _ATTRS_o_ai
-vec_ste(vector signed char a, int b, vector signed char *c)
+static vector signed char __attribute__((__always_inline__))
+vec_vavgsb(vector signed char a, vector signed char b)
{
- __builtin_altivec_stvebx((vector char)a, b, (void *)c);
+ return __builtin_altivec_vavgsb(a, b);
}
-static void _ATTRS_o_ai
-vec_ste(vector unsigned char a, int b, vector unsigned char *c)
+/* vec_vavgub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vavgub(vector unsigned char a, vector unsigned char b)
{
- __builtin_altivec_stvebx((vector char)a, b, (void *)c);
+ return __builtin_altivec_vavgub(a, b);
}
-static void _ATTRS_o_ai
-vec_ste(vector short a, int b, vector short *c)
+/* vec_vavgsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vavgsh(vector short a, vector short b)
{
- __builtin_altivec_stvehx(a, b, (void *)c);
+ return __builtin_altivec_vavgsh(a, b);
}
-static void _ATTRS_o_ai
-vec_ste(vector unsigned short a, int b, vector unsigned short *c)
+/* vec_vavguh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vavguh(vector unsigned short a, vector unsigned short b)
{
- __builtin_altivec_stvehx((vector short)a, b, (void *)c);
+ return __builtin_altivec_vavguh(a, b);
}
-static void _ATTRS_o_ai
-vec_ste(vector int a, int b, vector int *c)
+/* vec_vavgsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vavgsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vavgsw(a, b);
+}
+
+/* vec_vavguw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vavguw(vector unsigned int a, vector unsigned int b)
{
- __builtin_altivec_stvewx(a, b, (void *)c);
+ return __builtin_altivec_vavguw(a, b);
}
-static void _ATTRS_o_ai
-vec_ste(vector unsigned int a, int b, vector unsigned int *c)
+/* vec_ceil */
+
+static vector float __attribute__((__always_inline__))
+vec_ceil(vector float a)
{
- __builtin_altivec_stvewx((vector int)a, b, (void *)c);
+ return __builtin_altivec_vrfip(a);
}
-static void _ATTRS_o_ai
-vec_ste(vector float a, int b, vector float *c)
+/* vec_vrfip */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfip(vector float a)
{
- __builtin_altivec_stvewx((vector int)a, b, (void *)c);
+ return __builtin_altivec_vrfip(a);
}
/* vec_cmpb */
-#define vec_cmpb __builtin_altivec_vcmpbfp
-#define vec_vcmpbfp __builtin_altivec_vcmpbfp
-#define __builtin_vec_cmpb __builtin_altivec_vcmpbfp
+static vector int __attribute__((__always_inline__))
+vec_cmpb(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp(a, b);
+}
+
+/* vec_vcmpbfp */
-/* vec_cmpeq */
+static vector int __attribute__((__always_inline__))
+vec_vcmpbfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp(a, b);
+}
-#define __builtin_vec_cmpeq vec_cmpeq
+/* vec_cmpeq */
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmpeq(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpequb((vector char)a, (vector char)b);
}
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmpeq(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpequb((vector char)a, (vector char)b);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmpeq(vector short a, vector short b)
{
return __builtin_altivec_vcmpequh(a, b);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmpeq(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpequh((vector short)a, (vector short)b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpeq(vector int a, vector int b)
{
return __builtin_altivec_vcmpequw(a, b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpeq(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpequw((vector int)a, (vector int)b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpeq(vector float a, vector float b)
{
return __builtin_altivec_vcmpeqfp(a, b);
@@ -569,72 +667,121 @@ vec_cmpeq(vector float a, vector float b)
/* vec_cmpge */
-#define vec_cmpge __builtin_altivec_vcmpgefp
-#define vec_vcmpgefp __builtin_altivec_vcmpgefp
-#define __builtin_vec_cmpge __builtin_altivec_vcmpgefp
+static vector /*bool*/ int __attribute__((__always_inline__))
+vec_cmpge(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp(a, b);
+}
+
+/* vec_vcmpgefp */
+
+static vector /*bool*/ int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp(a, b);
+}
/* vec_cmpgt */
-#define vec_vcmpgtsb __builtin_altivec_vcmpgtsb
-#define vec_vcmpgtub __builtin_altivec_vcmpgtub
-#define vec_vcmpgtsh __builtin_altivec_vcmpgtsh
-#define vec_vcmpgtuh __builtin_altivec_vcmpgtuh
-#define vec_vcmpgtsw __builtin_altivec_vcmpgtsw
-#define vec_vcmpgtuw __builtin_altivec_vcmpgtuw
-#define vec_vcmpgtfp __builtin_altivec_vcmpgtfp
-#define __builtin_vec_vcmpgtsb __builtin_altivec_vcmpgtsb
-#define __builtin_vec_vcmpgtub __builtin_altivec_vcmpgtub
-#define __builtin_vec_vcmpgtsh __builtin_altivec_vcmpgtsh
-#define __builtin_vec_vcmpgtuh __builtin_altivec_vcmpgtuh
-#define __builtin_vec_vcmpgtsw __builtin_altivec_vcmpgtsw
-#define __builtin_vec_vcmpgtuw __builtin_altivec_vcmpgtuw
-#define __builtin_vec_vcmpgtfp __builtin_altivec_vcmpgtfp
-
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmpgt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb(a, b);
}
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmpgt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub(a, b);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmpgt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh(a, b);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmpgt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh(a, b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpgt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw(a, b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpgt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw(a, b);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmpgt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp(a, b);
}
-/* vec_cmple */
+/* vec_vcmpgtsb */
-#define __builtin_vec_cmple vec_cmple
+static vector /*bool*/ char __attribute__((__always_inline__))
+vec_vcmpgtsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb(a, b);
+}
+
+/* vec_vcmpgtub */
+
+static vector /*bool*/ char __attribute__((__always_inline__))
+vec_vcmpgtub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub(a, b);
+}
+
+/* vec_vcmpgtsh */
+
+static vector /*bool*/ short __attribute__((__always_inline__))
+vec_vcmpgtsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh(a, b);
+}
+
+/* vec_vcmpgtuh */
+
+static vector /*bool*/ short __attribute__((__always_inline__))
+vec_vcmpgtuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh(a, b);
+}
+
+/* vec_vcmpgtsw */
+
+static vector /*bool*/ int __attribute__((__always_inline__))
+vec_vcmpgtsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw(a, b);
+}
+
+/* vec_vcmpgtuw */
+
+static vector /*bool*/ int __attribute__((__always_inline__))
+vec_vcmpgtuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw(a, b);
+}
+
+/* vec_vcmpgtfp */
+
+static vector /*bool*/ int __attribute__((__always_inline__))
+vec_vcmpgtfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp(a, b);
+}
+
+/* vec_cmple */
static vector /*bool*/ int __attribute__((__always_inline__))
vec_cmple(vector float a, vector float b)
@@ -644,239 +791,4533 @@ vec_cmple(vector float a, vector float b)
/* vec_cmplt */
-#define __builtin_vec_cmplt vec_cmplt
-
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmplt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb(b, a);
}
-static vector /*bool*/ char _ATTRS_o_ai
+static vector /*bool*/ char __ATTRS_o_ai
vec_cmplt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub(b, a);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmplt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh(b, a);
}
-static vector /*bool*/ short _ATTRS_o_ai
+static vector /*bool*/ short __ATTRS_o_ai
vec_cmplt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh(b, a);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmplt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw(b, a);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmplt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw(b, a);
}
-static vector /*bool*/ int _ATTRS_o_ai
+static vector /*bool*/ int __ATTRS_o_ai
vec_cmplt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp(b, a);
}
+/* vec_ctf */
+
+static vector float __ATTRS_o_ai
+vec_ctf(vector int a, int b)
+{
+ return __builtin_altivec_vcfsx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ctf(vector unsigned int a, int b)
+{
+ return __builtin_altivec_vcfux((vector int)a, b);
+}
+
+/* vec_vcfsx */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfsx(vector int a, int b)
+{
+ return __builtin_altivec_vcfsx(a, b);
+}
+
+/* vec_vcfux */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfux(vector unsigned int a, int b)
+{
+ return __builtin_altivec_vcfux((vector int)a, b);
+}
+
+/* vec_cts */
+
+static vector int __attribute__((__always_inline__))
+vec_cts(vector float a, int b)
+{
+ return __builtin_altivec_vctsxs(a, b);
+}
+
+/* vec_vctsxs */
+
+static vector int __attribute__((__always_inline__))
+vec_vctsxs(vector float a, int b)
+{
+ return __builtin_altivec_vctsxs(a, b);
+}
+
+/* vec_ctu */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_ctu(vector float a, int b)
+{
+ return __builtin_altivec_vctuxs(a, b);
+}
+
+/* vec_vctuxs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vctuxs(vector float a, int b)
+{
+ return __builtin_altivec_vctuxs(a, b);
+}
+
+/* vec_dss */
+
+static void __attribute__((__always_inline__))
+vec_dss(int a)
+{
+ __builtin_altivec_dss(a);
+}
+
+/* vec_dssall */
+
+static void __attribute__((__always_inline__))
+vec_dssall(void)
+{
+ __builtin_altivec_dssall();
+}
+
+/* vec_dst */
+
+static void __attribute__((__always_inline__))
+vec_dst(void *a, int b, int c)
+{
+ __builtin_altivec_dst(a, b, c);
+}
+
+/* vec_dstst */
+
+static void __attribute__((__always_inline__))
+vec_dstst(void *a, int b, int c)
+{
+ __builtin_altivec_dstst(a, b, c);
+}
+
+/* vec_dststt */
+
+static void __attribute__((__always_inline__))
+vec_dststt(void *a, int b, int c)
+{
+ __builtin_altivec_dststt(a, b, c);
+}
+
+/* vec_dstt */
+
+static void __attribute__((__always_inline__))
+vec_dstt(void *a, int b, int c)
+{
+ __builtin_altivec_dstt(a, b, c);
+}
+
+/* vec_expte */
+
+static vector float __attribute__((__always_inline__))
+vec_expte(vector float a)
+{
+ return __builtin_altivec_vexptefp(a);
+}
+
+/* vec_vexptefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vexptefp(vector float a)
+{
+ return __builtin_altivec_vexptefp(a);
+}
+
+/* vec_floor */
+
+static vector float __attribute__((__always_inline__))
+vec_floor(vector float a)
+{
+ return __builtin_altivec_vrfim(a);
+}
+
+/* vec_vrfim */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfim(vector float a)
+{
+ return __builtin_altivec_vrfim(a);
+}
+
+/* vec_ld */
+
+static vector signed char __ATTRS_o_ai
+vec_ld(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_ld(int a, signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ld(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ld(int a, short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int a, unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ld(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ld(int a, int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ld(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ld(int a, unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ld(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ld(int a, float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+/* vec_lvx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvx(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvx(int a, signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvx(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvx(int a, short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int a, unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvx(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvx(int a, int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int a, unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvx(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvx(int a, float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+/* vec_lde */
+
+static vector signed char __ATTRS_o_ai
+vec_lde(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lde(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lde(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lde(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lde(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lde(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lde(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvewx(a, b);
+}
+
+/* vec_lvebx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvebx(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvebx(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvebx(a, b);
+}
+
+/* vec_lvehx */
+
+static vector short __ATTRS_o_ai
+vec_lvehx(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvehx(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvehx(a, b);
+}
+
+/* vec_lvewx */
+
+static vector int __ATTRS_o_ai
+vec_lvewx(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvewx(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvewx(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvewx(a, b);
+}
+
+/* vec_ldl */
+
+static vector signed char __ATTRS_o_ai
+vec_ldl(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_ldl(int a, signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ldl(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ldl(int a, short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int a, unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ldl(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ldl(int a, int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int a, unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ldl(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ldl(int a, float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+/* vec_lvxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvxl(int a, vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvxl(int a, signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int a, vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvxl(int a, vector short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvxl(int a, short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int a, vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int a, unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvxl(int a, vector int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvxl(int a, int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int a, vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int a, unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvxl(int a, vector float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvxl(int a, float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+/* vec_loge */
+
+static vector float __attribute__((__always_inline__))
+vec_loge(vector float a)
+{
+ return __builtin_altivec_vlogefp(a);
+}
+
+/* vec_vlogefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vlogefp(vector float a)
+{
+ return __builtin_altivec_vlogefp(a);
+}
+
+/* vec_lvsl */
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, signed char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, unsigned short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, unsigned int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, float *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+/* vec_lvsr */
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, signed char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, unsigned short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, unsigned int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, float *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+/* vec_madd */
+
+static vector float __attribute__((__always_inline__))
+vec_madd(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vmaddfp(a, b, c);
+}
+
+/* vec_vmaddfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaddfp(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vmaddfp(a, b, c);
+}
+
+/* vec_madds */
+
+static vector signed short __attribute__((__always_inline__))
+vec_madds(vector signed short a, vector signed short b, vector signed short c)
+{
+ return __builtin_altivec_vmhaddshs(a, b, c);
+}
+
+/* vec_vmhaddshs */
+static vector signed short __attribute__((__always_inline__))
+vec_vmhaddshs(vector signed short a, vector signed short b, vector signed short c)
+{
+ return __builtin_altivec_vmhaddshs(a, b, c);
+}
+
/* vec_max */
-#define __builtin_vec_vmaxsb __builtin_altivec_vmaxsb
-#define __builtin_vec_vmaxub __builtin_altivec_vmaxub
-#define __builtin_vec_vmaxsh __builtin_altivec_vmaxsh
-#define __builtin_vec_vmaxuh __builtin_altivec_vmaxuh
-#define __builtin_vec_vmaxsw __builtin_altivec_vmaxsw
-#define __builtin_vec_vmaxuw __builtin_altivec_vmaxuw
-#define __builtin_vec_vmaxfp __builtin_altivec_vmaxfp
-#define vec_vmaxsb __builtin_altivec_vmaxsb
-#define vec_vmaxub __builtin_altivec_vmaxub
-#define vec_vmaxsh __builtin_altivec_vmaxsh
-#define vec_vmaxuh __builtin_altivec_vmaxuh
-#define vec_vmaxsw __builtin_altivec_vmaxsw
-#define vec_vmaxuw __builtin_altivec_vmaxuw
-#define vec_vmaxfp __builtin_altivec_vmaxfp
-#define __builtin_vec_max vec_max
-
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_max(vector signed char a, vector signed char b)
{
return __builtin_altivec_vmaxsb(a, b);
}
-static vector unsigned char _ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_max(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vmaxub(a, b);
}
-static vector short _ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_max(vector short a, vector short b)
{
return __builtin_altivec_vmaxsh(a, b);
}
-static vector unsigned short _ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_max(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vmaxuh(a, b);
}
-static vector int _ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_max(vector int a, vector int b)
{
return __builtin_altivec_vmaxsw(a, b);
}
-static vector unsigned int _ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_max(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vmaxuw(a, b);
}
-static vector float _ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_max(vector float a, vector float b)
{
return __builtin_altivec_vmaxfp(a, b);
}
+/* vec_vmaxsb */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vmaxsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmaxsb(a, b);
+}
+
+/* vec_vmaxub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vmaxub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmaxub(a, b);
+}
+
+/* vec_vmaxsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vmaxsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmaxsh(a, b);
+}
+
+/* vec_vmaxuh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmaxuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmaxuh(a, b);
+}
+
+/* vec_vmaxsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vmaxsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vmaxsw(a, b);
+}
+
+/* vec_vmaxuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmaxuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vmaxuw(a, b);
+}
+
+/* vec_vmaxfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaxfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vmaxfp(a, b);
+}
+
+/* vec_mergeh */
+
+static vector signed char __ATTRS_o_ai
+vec_mergeh(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_mergeh(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector short __ATTRS_o_ai
+vec_mergeh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergeh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector int __ATTRS_o_ai
+vec_mergeh(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergeh(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai
+vec_mergeh(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_vmrghb */
+
+#define __builtin_altivec_vmrghb vec_vmrghb
+
+static vector signed char __ATTRS_o_ai
+vec_vmrghb(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmrghb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+/* vec_vmrghh */
+
+#define __builtin_altivec_vmrghh vec_vmrghh
+
+static vector short __ATTRS_o_ai
+vec_vmrghh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrghh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+/* vec_vmrghw */
+
+#define __builtin_altivec_vmrghw vec_vmrghw
+
+static vector int __ATTRS_o_ai
+vec_vmrghw(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmrghw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai
+vec_vmrghw(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_mergel */
+
+static vector signed char __ATTRS_o_ai
+vec_mergel(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_mergel(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector short __ATTRS_o_ai
+vec_mergel(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergel(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector int __ATTRS_o_ai
+vec_mergel(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergel(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai
+vec_mergel(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+/* vec_vmrglb */
+
+#define __builtin_altivec_vmrglb vec_vmrglb
+
+static vector signed char __ATTRS_o_ai
+vec_vmrglb(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmrglb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+/* vec_vmrglh */
+
+#define __builtin_altivec_vmrglh vec_vmrglh
+
+static vector short __ATTRS_o_ai
+vec_vmrglh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrglh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+/* vec_vmrglw */
+
+#define __builtin_altivec_vmrglw vec_vmrglw
+
+static vector int __ATTRS_o_ai
+vec_vmrglw(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmrglw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai
+vec_vmrglw(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
/* vec_mfvscr */
-#define __builtin_vec_mfvscr __builtin_altivec_mfvscr
-#define vec_mfvscr __builtin_altivec_mfvscr
+static vector unsigned short __attribute__((__always_inline__))
+vec_mfvscr(void)
+{
+ return __builtin_altivec_mfvscr();
+}
/* vec_min */
-#define __builtin_vec_vminsb __builtin_altivec_vminsb
-#define __builtin_vec_vminub __builtin_altivec_vminub
-#define __builtin_vec_vminsh __builtin_altivec_vminsh
-#define __builtin_vec_vminuh __builtin_altivec_vminuh
-#define __builtin_vec_vminsw __builtin_altivec_vminsw
-#define __builtin_vec_vminuw __builtin_altivec_vminuw
-#define __builtin_vec_vminfp __builtin_altivec_vminfp
-#define vec_vminsb __builtin_altivec_vminsb
-#define vec_vminub __builtin_altivec_vminub
-#define vec_vminsh __builtin_altivec_vminsh
-#define vec_vminuh __builtin_altivec_vminuh
-#define vec_vminsw __builtin_altivec_vminsw
-#define vec_vminuw __builtin_altivec_vminuw
-#define vec_vminfp __builtin_altivec_vminfp
-#define __builtin_vec_min vec_min
-
-static vector signed char _ATTRS_o_ai
+static vector signed char __ATTRS_o_ai
vec_min(vector signed char a, vector signed char b)
{
return __builtin_altivec_vminsb(a, b);
}
-static vector unsigned char _ATTRS_o_ai
+static vector unsigned char __ATTRS_o_ai
vec_min(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vminub(a, b);
}
-static vector short _ATTRS_o_ai
+static vector short __ATTRS_o_ai
vec_min(vector short a, vector short b)
{
return __builtin_altivec_vminsh(a, b);
}
-static vector unsigned short _ATTRS_o_ai
+static vector unsigned short __ATTRS_o_ai
vec_min(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vminuh(a, b);
}
-static vector int _ATTRS_o_ai
+static vector int __ATTRS_o_ai
vec_min(vector int a, vector int b)
{
return __builtin_altivec_vminsw(a, b);
}
-static vector unsigned int _ATTRS_o_ai
+static vector unsigned int __ATTRS_o_ai
vec_min(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vminuw(a, b);
}
-static vector float _ATTRS_o_ai
+static vector float __ATTRS_o_ai
vec_min(vector float a, vector float b)
{
return __builtin_altivec_vminfp(a, b);
}
+/* vec_vminsb */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vminsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vminsb(a, b);
+}
+
+/* vec_vminub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vminub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vminub(a, b);
+}
+
+/* vec_vminsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vminsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vminsh(a, b);
+}
+
+/* vec_vminuh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vminuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vminuh(a, b);
+}
+
+/* vec_vminsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vminsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vminsw(a, b);
+}
+
+/* vec_vminuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vminuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vminuw(a, b);
+}
+
+/* vec_vminfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vminfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vminfp(a, b);
+}
+
+/* vec_mladd */
+
+#define __builtin_altivec_vmladduhm vec_mladd
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector short a, vector short b, vector short c)
+{
+ return a * b + c;
+}
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * (vector short)b + (vector short)c;
+}
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector unsigned short a, vector short b, vector short c)
+{
+ return (vector short)a * b + c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * b + c;
+}
+
+/* vec_vmladduhm */
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector short a, vector short b, vector short c)
+{
+ return a * b + c;
+}
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * (vector short)b + (vector short)c;
+}
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short a, vector short b, vector short c)
+{
+ return (vector short)a * b + c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * b + c;
+}
+
+/* vec_mradds */
+
+static vector short __attribute__((__always_inline__))
+vec_mradds(vector short a, vector short b, vector short c)
+{
+ return __builtin_altivec_vmhraddshs(a, b, c);
+}
+
+/* vec_vmhraddshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vmhraddshs(vector short a, vector short b, vector short c)
+{
+ return __builtin_altivec_vmhraddshs(a, b, c);
+}
+
+/* vec_msum */
+
+static vector int __ATTRS_o_ai
+vec_msum(vector signed char a, vector unsigned char b, vector int c)
+{
+ return __builtin_altivec_vmsummbm(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned char a, vector unsigned char b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumubm(a, b, c);
+}
+
+static vector int __ATTRS_o_ai
+vec_msum(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshm(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhm(a, b, c);
+}
+
+/* vec_vmsummbm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsummbm(vector signed char a, vector unsigned char b, vector int c)
+{
+ return __builtin_altivec_vmsummbm(a, b, c);
+}
+
+/* vec_vmsumubm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumubm(vector unsigned char a, vector unsigned char b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumubm(a, b, c);
+}
+
+/* vec_vmsumshm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshm(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshm(a, b, c);
+}
+
+/* vec_vmsumuhm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhm(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhm(a, b, c);
+}
+
+/* vec_msums */
+
+static vector int __ATTRS_o_ai
+vec_msums(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshs(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msums(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhs(a, b, c);
+}
+
+/* vec_vmsumshs */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshs(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshs(a, b, c);
+}
+
+/* vec_vmsumuhs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhs(vector unsigned short a, vector unsigned short b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhs(a, b, c);
+}
+
/* vec_mtvscr */
-#define __builtin_vec_mtvscr __builtin_altivec_mtvscr
-#define vec_mtvscr __builtin_altivec_mtvscr
+static void __ATTRS_o_ai
+vec_mtvscr(vector signed char a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
-/* ------------------------------ predicates ------------------------------------ */
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned char a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
-static int __attribute__((__always_inline__))
-__builtin_vec_vcmpeq_p(char CR6_param, vector float a, vector float b)
+static void __ATTRS_o_ai
+vec_mtvscr(vector short a)
{
- return __builtin_altivec_vcmpeqfp_p(CR6_param, a, b);
+ __builtin_altivec_mtvscr((vector int)a);
}
-static int __attribute__((__always_inline__))
-__builtin_vec_vcmpge_p(char CR6_param, vector float a, vector float b)
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned short a)
{
- return __builtin_altivec_vcmpgefp_p(CR6_param, a, b);
+ __builtin_altivec_mtvscr((vector int)a);
}
-static int __attribute__((__always_inline__))
-__builtin_vec_vcmpgt_p(char CR6_param, vector float a, vector float b)
+static void __ATTRS_o_ai
+vec_mtvscr(vector int a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned int a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector float a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+/* vec_mule */
+
+static vector short __ATTRS_o_ai
+vec_mule(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulesb(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mule(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuleub(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_mule(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulesh(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mule(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmuleuh(a, b);
+}
+
+/* vec_vmulesb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulesb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulesb(a, b);
+}
+
+/* vec_vmuleub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuleub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuleub(a, b);
+}
+
+/* vec_vmulesh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulesh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulesh(a, b);
+}
+
+/* vec_vmuleuh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmuleuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmuleuh(a, b);
+}
+
+/* vec_mulo */
+
+static vector short __ATTRS_o_ai
+vec_mulo(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulosb(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mulo(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuloub(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_mulo(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulosh(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mulo(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmulouh(a, b);
+}
+
+/* vec_vmulosb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulosb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulosb(a, b);
+}
+
+/* vec_vmuloub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuloub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuloub(a, b);
+}
+
+/* vec_vmulosh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulosh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulosh(a, b);
+}
+
+/* vec_vmulouh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmulouh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmulouh(a, b);
+}
+
+/* vec_nmsub */
+
+static vector float __attribute__((__always_inline__))
+vec_nmsub(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vnmsubfp(a, b, c);
+}
+
+/* vec_vnmsubfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vnmsubfp(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vnmsubfp(a, b, c);
+}
+
+/* vec_nor */
+
+#define __builtin_altivec_vnor vec_nor
+
+static vector signed char __ATTRS_o_ai
+vec_nor(vector signed char a, vector signed char b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_nor(vector unsigned char a, vector unsigned char b)
+{
+ return ~(a | b);
+}
+
+static vector short __ATTRS_o_ai
+vec_nor(vector short a, vector short b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_nor(vector unsigned short a, vector unsigned short b)
+{
+ return ~(a | b);
+}
+
+static vector int __ATTRS_o_ai
+vec_nor(vector int a, vector int b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_nor(vector unsigned int a, vector unsigned int b)
+{
+ return ~(a | b);
+}
+
+static vector float __ATTRS_o_ai
+vec_nor(vector float a, vector float b)
+{
+ vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b);
+ return (vector float)res;
+}
+
+/* vec_vnor */
+
+static vector signed char __ATTRS_o_ai
+vec_vnor(vector signed char a, vector signed char b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vnor(vector unsigned char a, vector unsigned char b)
+{
+ return ~(a | b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vnor(vector short a, vector short b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vnor(vector unsigned short a, vector unsigned short b)
+{
+ return ~(a | b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vnor(vector int a, vector int b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vnor(vector unsigned int a, vector unsigned int b)
+{
+ return ~(a | b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vnor(vector float a, vector float b)
+{
+ vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b);
+ return (vector float)res;
+}
+
+/* vec_or */
+
+#define __builtin_altivec_vor vec_or
+
+static vector signed char __ATTRS_o_ai
+vec_or(vector signed char a, vector signed char b)
+{
+ return a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char a, vector unsigned char b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_or(vector short a, vector short b)
+{
+ return a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short a, vector unsigned short b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_or(vector int a, vector int b)
+{
+ return a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int a, vector unsigned int b)
+{
+ return a | b;
+}
+
+static vector float __ATTRS_o_ai
+vec_or(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vor */
+
+static vector signed char __ATTRS_o_ai
+vec_vor(vector signed char a, vector signed char b)
+{
+ return a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char a, vector unsigned char b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vor(vector short a, vector short b)
+{
+ return a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short a, vector unsigned short b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vor(vector int a, vector int b)
+{
+ return a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int a, vector unsigned int b)
+{
+ return a | b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_pack */
+
+static vector signed char __ATTRS_o_ai
+vec_pack(vector signed short a, vector signed short b)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_pack(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector short __ATTRS_o_ai
+vec_pack(vector int a, vector int b)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_pack(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+/* vec_vpkuhum */
+
+#define __builtin_altivec_vpkuhum vec_vpkuhum
+
+static vector signed char __ATTRS_o_ai
+vec_vpkuhum(vector signed short a, vector signed short b)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkuhum(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+/* vec_vpkuwum */
+
+#define __builtin_altivec_vpkuwum vec_vpkuwum
+
+static vector short __ATTRS_o_ai
+vec_vpkuwum(vector int a, vector int b)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkuwum(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+/* vec_packpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_packpx(vector unsigned int a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vpkpx(a, b);
+}
+
+/* vec_vpkpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_vpkpx(vector unsigned int a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vpkpx(a, b);
+}
+
+/* vec_packs */
+
+static vector signed char __ATTRS_o_ai
+vec_packs(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshss(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_packs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_packs(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswss(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packs(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_vpkshss */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vpkshss(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshss(a, b);
+}
+
+/* vec_vpkuhus */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vpkuhus(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+/* vec_vpkswss */
+
+static vector signed short __attribute__((__always_inline__))
+vec_vpkswss(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswss(a, b);
+}
+
+/* vec_vpkuwus */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vpkuwus(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_packsu */
+
+static vector unsigned char __ATTRS_o_ai
+vec_packsu(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshus(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_packsu(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packsu(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packsu(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_vpkshus */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshus(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+/* vec_vpkswus */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_perm */
+
+vector signed char __ATTRS_o_ai
+vec_perm(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector short __ATTRS_o_ai
+vec_perm(vector short a, vector short b, vector unsigned char c)
+{
+ return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c)
+{
+ return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector int __ATTRS_o_ai
+vec_perm(vector int a, vector int b, vector unsigned char c)
+{
+ return (vector int)__builtin_altivec_vperm_4si(a, b, c);
+}
+
+vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
+{
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector float __ATTRS_o_ai
+vec_perm(vector float a, vector float b, vector unsigned char c)
+{
+ return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+/* vec_vperm */
+
+vector signed char __ATTRS_o_ai
+vec_vperm(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned char __ATTRS_o_ai
+vec_vperm(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector short __ATTRS_o_ai
+vec_vperm(vector short a, vector short b, vector unsigned char c)
+{
+ return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned short __ATTRS_o_ai
+vec_vperm(vector unsigned short a, vector unsigned short b, vector unsigned char c)
+{
+ return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector int __ATTRS_o_ai
+vec_vperm(vector int a, vector int b, vector unsigned char c)
+{
+ return (vector int)__builtin_altivec_vperm_4si(a, b, c);
+}
+
+vector unsigned int __ATTRS_o_ai
+vec_vperm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
+{
+ return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector float __ATTRS_o_ai
+vec_vperm(vector float a, vector float b, vector unsigned char c)
+{
+ return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+/* vec_re */
+
+vector float __attribute__((__always_inline__))
+vec_re(vector float a)
+{
+ return __builtin_altivec_vrefp(a);
+}
+
+/* vec_vrefp */
+
+vector float __attribute__((__always_inline__))
+vec_vrefp(vector float a)
+{
+ return __builtin_altivec_vrefp(a);
+}
+
+/* vec_rl */
+
+static vector signed char __ATTRS_o_ai
+vec_rl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_rl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_rl(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vrlh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_rl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_rl(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vrlw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_rl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b);
+}
+
+/* vec_vrlb */
+
+static vector signed char __ATTRS_o_ai
+vec_vrlb(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vrlb(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+/* vec_vrlh */
+
+static vector short __ATTRS_o_ai
+vec_vrlh(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vrlh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vrlh(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b);
+}
+
+/* vec_vrlw */
+
+static vector int __ATTRS_o_ai
+vec_vrlw(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vrlw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vrlw(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b);
+}
+
+/* vec_round */
+
+static vector float __attribute__((__always_inline__))
+vec_round(vector float a)
+{
+ return __builtin_altivec_vrfin(a);
+}
+
+/* vec_vrfin */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfin(vector float a)
+{
+ return __builtin_altivec_vrfin(a);
+}
+
+/* vec_rsqrte */
+
+static __vector float __attribute__((__always_inline__))
+vec_rsqrte(vector float a)
+{
+ return __builtin_altivec_vrsqrtefp(a);
+}
+
+/* vec_vrsqrtefp */
+
+static __vector float __attribute__((__always_inline__))
+vec_vrsqrtefp(vector float a)
+{
+ return __builtin_altivec_vrsqrtefp(a);
+}
+
+/* vec_sel */
+
+#define __builtin_altivec_vsel_4si vec_sel
+
+static vector signed char __ATTRS_o_ai
+vec_sel(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sel(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector short __ATTRS_o_ai
+vec_sel(vector short a, vector short b, vector unsigned short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector int __ATTRS_o_ai
+vec_sel(vector int a, vector int b, vector unsigned int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sel(vector unsigned int a, vector unsigned int b, vector unsigned int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector float __ATTRS_o_ai
+vec_sel(vector float a, vector float b, vector unsigned int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+/* vec_vsel */
+
+static vector signed char __ATTRS_o_ai
+vec_vsel(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsel(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsel(vector short a, vector short b, vector unsigned short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short a, vector unsigned short b, vector unsigned short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsel(vector int a, vector int b, vector unsigned int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsel(vector unsigned int a, vector unsigned int b, vector unsigned int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsel(vector float a, vector float b, vector unsigned int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+/* vec_sl */
+
+static vector signed char __ATTRS_o_ai
+vec_sl(vector signed char a, vector unsigned char b)
+{
+ return a << (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sl(vector unsigned char a, vector unsigned char b)
+{
+ return a << b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sl(vector short a, vector unsigned short b)
+{
+ return a << (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sl(vector unsigned short a, vector unsigned short b)
+{
+ return a << b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sl(vector int a, vector unsigned int b)
+{
+ return a << (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sl(vector unsigned int a, vector unsigned int b)
+{
+ return a << b;
+}
+
+/* vec_vslb */
+
+#define __builtin_altivec_vslb vec_vslb
+
+static vector signed char __ATTRS_o_ai
+vec_vslb(vector signed char a, vector unsigned char b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_vslh */
+
+#define __builtin_altivec_vslh vec_vslh
+
+static vector short __ATTRS_o_ai
+vec_vslh(vector short a, vector unsigned short b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_vslw */
+
+#define __builtin_altivec_vslw vec_vslw
+
+static vector int __ATTRS_o_ai
+vec_vslw(vector int a, vector unsigned int b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_sld */
+
+#define __builtin_altivec_vsldoi_4si vec_sld
+
+static vector signed char __ATTRS_o_ai
+vec_sld(vector signed char a, vector signed char b, unsigned char c)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sld(vector unsigned char a, vector unsigned char b, unsigned char c)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector short __ATTRS_o_ai
+vec_sld(vector short a, vector short b, unsigned char c)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sld(vector unsigned short a, vector unsigned short b, unsigned char c)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector int __ATTRS_o_ai
+vec_sld(vector int a, vector int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sld(vector unsigned int a, vector unsigned int b, unsigned char c)
+{
+ return (vector unsigned int)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector float __ATTRS_o_ai
+vec_sld(vector float a, vector float b, unsigned char c)
+{
+ return (vector float)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+/* vec_vsldoi */
+
+static vector signed char __ATTRS_o_ai
+vec_vsldoi(vector signed char a, vector signed char b, unsigned char c)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsldoi(vector unsigned char a, vector unsigned char b, unsigned char c)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector short __ATTRS_o_ai
+vec_vsldoi(vector short a, vector short b, unsigned char c)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsldoi(vector unsigned short a, vector unsigned short b, unsigned char c)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector int __ATTRS_o_ai
+vec_vsldoi(vector int a, vector int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsldoi(vector unsigned int a, vector unsigned int b, unsigned char c)
+{
+ return (vector unsigned int)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector float __ATTRS_o_ai
+vec_vsldoi(vector float a, vector float b, unsigned char c)
+{
+ return (vector float)vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+/* vec_sll */
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+/* vec_vsl */
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+/* vec_slo */
+
+static vector signed char __ATTRS_o_ai
+vec_slo(vector signed char a, vector signed char b)
+{
+ return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_slo(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_slo(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_slo(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_slo(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_slo(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_slo(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_slo(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+/* vec_vslo */
+
+static vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char a, vector signed char b)
+{
+ return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpgtfp_p(CR6_param, a, b);
+ return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b);
}
+static vector short __ATTRS_o_ai
+vec_vslo(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vslo(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vslo(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vslo(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vslo(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vslo(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+/* vec_splat */
+
+static vector signed char __ATTRS_o_ai
+vec_splat(vector signed char a, unsigned char b)
+{
+ return (vector signed char)vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_splat(vector unsigned char a, unsigned char b)
+{
+ return (vector unsigned char)vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector short __ATTRS_o_ai
+vec_splat(vector short a, unsigned char b)
+{
+ b *= 2;
+ return (vector short)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_splat(vector unsigned short a, unsigned char b)
+{
+ b *= 2;
+ return (vector unsigned short)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+}
+
+static vector int __ATTRS_o_ai
+vec_splat(vector int a, unsigned char b)
+{
+ b *= 4;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_splat(vector unsigned int a, unsigned char b)
+{
+ b *= 4;
+ return (vector unsigned int)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+static vector float __ATTRS_o_ai
+vec_splat(vector float a, unsigned char b)
+{
+ b *= 4;
+ return (vector float)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+/* vec_vspltb */
+
+#define __builtin_altivec_vspltb vec_vspltb
+
+static vector signed char __ATTRS_o_ai
+vec_vspltb(vector signed char a, unsigned char b)
+{
+ return (vector signed char)vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vspltb(vector unsigned char a, unsigned char b)
+{
+ return (vector unsigned char)vec_perm(a, a, (vector unsigned char)(b));
+}
+
+/* vec_vsplth */
+
+#define __builtin_altivec_vsplth vec_vsplth
+
+static vector short __ATTRS_o_ai
+vec_vsplth(vector short a, unsigned char b)
+{
+ b *= 2;
+ return (vector short)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsplth(vector unsigned short a, unsigned char b)
+{
+ b *= 2;
+ return (vector unsigned short)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1));
+}
+
+/* vec_vspltw */
+
+#define __builtin_altivec_vspltw vec_vspltw
+
+static vector int __ATTRS_o_ai
+vec_vspltw(vector int a, unsigned char b)
+{
+ b *= 4;
+ return (vector int)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vspltw(vector unsigned int a, unsigned char b)
+{
+ b *= 4;
+ return (vector unsigned int)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+static vector float __ATTRS_o_ai
+vec_vspltw(vector float a, unsigned char b)
+{
+ b *= 4;
+ return (vector float)vec_perm(a, a, (vector unsigned char)
+ (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3));
+}
+
+/* vec_splat_s8 */
+
+#define __builtin_altivec_vspltisb vec_splat_s8
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai
+vec_splat_s8(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+/* vec_vspltisb */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai
+vec_vspltisb(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+/* vec_splat_s16 */
+
+#define __builtin_altivec_vspltish vec_splat_s16
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai
+vec_splat_s16(signed char a)
+{
+ return (vector short)(a);
+}
+
+/* vec_vspltish */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai
+vec_vspltish(signed char a)
+{
+ return (vector short)(a);
+}
+
+/* vec_splat_s32 */
+
+#define __builtin_altivec_vspltisw vec_splat_s32
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai
+vec_splat_s32(signed char a)
+{
+ return (vector int)(a);
+}
+
+/* vec_vspltisw */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai
+vec_vspltisw(signed char a)
+{
+ return (vector int)(a);
+}
+
+/* vec_splat_u8 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned char __ATTRS_o_ai
+vec_splat_u8(unsigned char a)
+{
+ return (vector unsigned char)(a);
+}
+
+/* vec_splat_u16 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned short __ATTRS_o_ai
+vec_splat_u16(signed char a)
+{
+ return (vector unsigned short)(a);
+}
+
+/* vec_splat_u32 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned int __ATTRS_o_ai
+vec_splat_u32(signed char a)
+{
+ return (vector unsigned int)(a);
+}
+
+/* vec_sr */
+
+static vector signed char __ATTRS_o_ai
+vec_sr(vector signed char a, vector unsigned char b)
+{
+ return a >> (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sr(vector unsigned char a, vector unsigned char b)
+{
+ return a >> b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sr(vector short a, vector unsigned short b)
+{
+ return a >> (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sr(vector unsigned short a, vector unsigned short b)
+{
+ return a >> b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sr(vector int a, vector unsigned int b)
+{
+ return a >> (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sr(vector unsigned int a, vector unsigned int b)
+{
+ return a >> b;
+}
+
+/* vec_vsrb */
+
+#define __builtin_altivec_vsrb vec_vsrb
+
+static vector signed char __ATTRS_o_ai
+vec_vsrb(vector signed char a, vector unsigned char b)
+{
+ return a >> (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsrb(vector unsigned char a, vector unsigned char b)
+{
+ return a >> b;
+}
+
+/* vec_vsrh */
+
+#define __builtin_altivec_vsrh vec_vsrh
+
+static vector short __ATTRS_o_ai
+vec_vsrh(vector short a, vector unsigned short b)
+{
+ return a >> (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsrh(vector unsigned short a, vector unsigned short b)
+{
+ return a >> b;
+}
+
+/* vec_vsrw */
+
+#define __builtin_altivec_vsrw vec_vsrw
+
+static vector int __ATTRS_o_ai
+vec_vsrw(vector int a, vector unsigned int b)
+{
+ return a >> (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsrw(vector unsigned int a, vector unsigned int b)
+{
+ return a >> b;
+}
+
+/* vec_sra */
+
+static vector signed char __ATTRS_o_ai
+vec_sra(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sra(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sra(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsrah(a, (vector unsigned short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sra(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sra(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsraw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sra(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b);
+}
+
+/* vec_vsrab */
+
+static vector signed char __ATTRS_o_ai
+vec_vsrab(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsrab(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+/* vec_vsrah */
+
+static vector short __ATTRS_o_ai
+vec_vsrah(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsrah(a, (vector unsigned short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsrah(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b);
+}
+
+/* vec_vsraw */
+
+static vector int __ATTRS_o_ai
+vec_vsraw(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsraw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsraw(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b);
+}
+
+/* vec_srl */
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+/* vec_vsr */
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+/* vec_sro */
+
+static vector signed char __ATTRS_o_ai
+vec_sro(vector signed char a, vector signed char b)
+{
+ return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sro(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sro(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sro(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sro(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sro(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_sro(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_sro(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+/* vec_vsro */
+
+static vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char a, vector signed char b)
+{
+ return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsro(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsro(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsro(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsro(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsro(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsro(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+/* vec_st */
+
+static void __ATTRS_o_ai
+vec_st(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+/* vec_stvx */
+
+static void __ATTRS_o_ai
+vec_stvx(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+/* vec_ste */
+
+static void __ATTRS_o_ai
+vec_ste(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvehx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvewx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+/* vec_stvebx */
+
+static void __ATTRS_o_ai
+vec_stvebx(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvebx(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+/* vec_stvehx */
+
+static void __ATTRS_o_ai
+vec_stvehx(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvehx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+/* vec_stvewx */
+
+static void __ATTRS_o_ai
+vec_stvewx(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvewx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+/* vec_stl */
+
+static void __ATTRS_o_ai
+vec_stl(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+/* vec_stvxl */
+
+static void __ATTRS_o_ai
+vec_stvxl(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+/* vec_sub */
+
+static vector signed char __ATTRS_o_ai
+vec_sub(vector signed char a, vector signed char b)
+{
+ return a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char a, vector unsigned char b)
+{
+ return a - b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sub(vector short a, vector short b)
+{
+ return a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short a, vector unsigned short b)
+{
+ return a - b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sub(vector int a, vector int b)
+{
+ return a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int a, vector unsigned int b)
+{
+ return a - b;
+}
+
+static vector float __ATTRS_o_ai
+vec_sub(vector float a, vector float b)
+{
+ return a - b;
+}
+
+/* vec_vsububm */
+
+#define __builtin_altivec_vsububm vec_vsububm
+
+static vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char a, vector signed char b)
+{
+ return a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char a, vector unsigned char b)
+{
+ return a - b;
+}
+
+/* vec_vsubuhm */
+
+#define __builtin_altivec_vsubuhm vec_vsubuhm
+
+static vector short __ATTRS_o_ai
+vec_vsubuhm(vector short a, vector short b)
+{
+ return a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short a, vector unsigned short b)
+{
+ return a - b;
+}
+
+/* vec_vsubuwm */
+
+#define __builtin_altivec_vsubuwm vec_vsubuwm
+
+static vector int __ATTRS_o_ai
+vec_vsubuwm(vector int a, vector int b)
+{
+ return a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int a, vector unsigned int b)
+{
+ return a - b;
+}
+
+/* vec_vsubfp */
+
+#define __builtin_altivec_vsubfp vec_vsubfp
+
+static vector float __attribute__((__always_inline__))
+vec_vsubfp(vector float a, vector float b)
+{
+ return a - b;
+}
+
+/* vec_subc */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_subc(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubcuw(a, b);
+}
+
+/* vec_vsubcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsubcuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubcuw(a, b);
+}
+
+/* vec_subs */
+
+static vector signed char __ATTRS_o_ai
+vec_subs(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_subs(vector short a, vector short b)
+{
+ return __builtin_altivec_vsubshs(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_subs(vector int a, vector int b)
+{
+ return __builtin_altivec_vsubsws(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws(a, b);
+}
+
+/* vec_vsubsbs */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vsubsbs(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs(a, b);
+}
+
+/* vec_vsububs */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vsububs(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs(a, b);
+}
+
+/* vec_vsubshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vsubshs(vector short a, vector short b)
+{
+ return __builtin_altivec_vsubshs(a, b);
+}
+
+/* vec_vsubuhs */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vsubuhs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs(a, b);
+}
+
+/* vec_vsubsws */
+
+static vector int __attribute__((__always_inline__))
+vec_vsubsws(vector int a, vector int b)
+{
+ return __builtin_altivec_vsubsws(a, b);
+}
+
+/* vec_vsubuws */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsubuws(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws(a, b);
+}
+
+/* vec_sum4s */
+
+static vector int __ATTRS_o_ai
+vec_sum4s(vector signed char a, vector int b)
+{
+ return __builtin_altivec_vsum4sbs(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sum4s(vector unsigned char a, vector unsigned int b)
+{
+ return __builtin_altivec_vsum4ubs(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sum4s(vector signed short a, vector int b)
+{
+ return __builtin_altivec_vsum4shs(a, b);
+}
+
+/* vec_vsum4sbs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4sbs(vector signed char a, vector int b)
+{
+ return __builtin_altivec_vsum4sbs(a, b);
+}
+
+/* vec_vsum4ubs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsum4ubs(vector unsigned char a, vector unsigned int b)
+{
+ return __builtin_altivec_vsum4ubs(a, b);
+}
+
+/* vec_vsum4shs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4shs(vector signed short a, vector int b)
+{
+ return __builtin_altivec_vsum4shs(a, b);
+}
+
+/* vec_sum2s */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sum2s(vector int a, vector int b)
+{
+ return __builtin_altivec_vsum2sws(a, b);
+}
+
+/* vec_vsum2sws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsum2sws(vector int a, vector int b)
+{
+ return __builtin_altivec_vsum2sws(a, b);
+}
+
+/* vec_sums */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sums(vector signed int a, vector signed int b)
+{
+ return __builtin_altivec_vsumsws(a, b);
+}
+
+/* vec_vsumsws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsumsws(vector signed int a, vector signed int b)
+{
+ return __builtin_altivec_vsumsws(a, b);
+}
+
+/* vec_trunc */
+
+static vector float __attribute__((__always_inline__))
+vec_trunc(vector float a)
+{
+ return __builtin_altivec_vrfiz(a);
+}
+
+/* vec_vrfiz */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfiz(vector float a)
+{
+ return __builtin_altivec_vrfiz(a);
+}
+
+/* vec_unpackh */
+
+static vector short __ATTRS_o_ai
+vec_unpackh(vector signed char a)
+{
+ return __builtin_altivec_vupkhsb((vector char)a);
+}
+
+static vector int __ATTRS_o_ai
+vec_unpackh(vector short a)
+{
+ return __builtin_altivec_vupkhsh(a);
+}
+
+/* vec_vupkhsb */
+
+static vector short __attribute__((__always_inline__))
+vec_vupkhsb(vector signed char a)
+{
+ return __builtin_altivec_vupkhsb((vector char)a);
+}
+
+/* vec_vupkhsh */
+
+static vector int __attribute__((__always_inline__))
+vec_vupkhsh(vector short a)
+{
+ return __builtin_altivec_vupkhsh(a);
+}
+
+/* vec_unpackl */
+
+static vector short __ATTRS_o_ai
+vec_unpackl(vector signed char a)
+{
+ return __builtin_altivec_vupklsb((vector char)a);
+}
+
+static vector int __ATTRS_o_ai
+vec_unpackl(vector short a)
+{
+ return __builtin_altivec_vupklsh(a);
+}
+
+/* vec_vupklsb */
+
+static vector short __attribute__((__always_inline__))
+vec_vupklsb(vector signed char a)
+{
+ return __builtin_altivec_vupklsb((vector char)a);
+}
+
+/* vec_vupklsh */
+
+static vector int __attribute__((__always_inline__))
+vec_vupklsh(vector short a)
+{
+ return __builtin_altivec_vupklsh(a);
+}
+
+/* vec_xor */
+
+#define __builtin_altivec_vxor vec_xor
+
+static vector signed char __ATTRS_o_ai
+vec_xor(vector signed char a, vector signed char b)
+{
+ return a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char a, vector unsigned char b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_xor(vector short a, vector short b)
+{
+ return a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short a, vector unsigned short b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_xor(vector int a, vector int b)
+{
+ return a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int a, vector unsigned int b)
+{
+ return a ^ b;
+}
+
+static vector float __ATTRS_o_ai
+vec_xor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vxor */
+
+static vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char a, vector signed char b)
+{
+ return a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char a, vector unsigned char b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vxor(vector short a, vector short b)
+{
+ return a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short a, vector unsigned short b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vxor(vector int a, vector int b)
+{
+ return a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int a, vector unsigned int b)
+{
+ return a ^ b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vxor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* ------------------------------ predicates ------------------------------------ */
+
/* vec_all_eq */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector short a, vector short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector int a, vector int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_eq(vector float a, vector float b)
{
return __builtin_altivec_vcmpeqfp_p(__CR6_LT, a, b);
@@ -884,87 +5325,87 @@ vec_all_eq(vector float a, vector float b)
/* vec_all_ge */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector signed char a, vector signed char b)
{
- return __builtin_altivec_vcmpgtsb_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector unsigned char a, vector unsigned char b)
{
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector short a, vector short b)
{
- return __builtin_altivec_vcmpgtsh_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector unsigned short a, vector unsigned short b)
{
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector int a, vector int b)
{
- return __builtin_altivec_vcmpgtsw_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector unsigned int a, vector unsigned int b)
{
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ge(vector float a, vector float b)
{
- return __builtin_altivec_vcmpgtfp_p(__CR6_LT, b, a);
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, a, b);
}
/* vec_all_gt */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_LT, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_gt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, a, b);
@@ -980,87 +5421,87 @@ vec_all_in(vector float a, vector float b)
/* vec_all_le */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_le(vector float a, vector float b)
{
- return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, a, b);
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, b, a);
}
/* vec_all_lt */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_lt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp_p(__CR6_LT, b, a);
@@ -1076,43 +5517,43 @@ vec_all_nan(vector float a)
/* vec_all_ne */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector short a, vector short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector int a, vector int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_EQ, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_all_ne(vector float a, vector float b)
{
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, a, b);
@@ -1160,43 +5601,43 @@ vec_all_numeric(vector float a)
/* vec_any_eq */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector short a, vector short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector int a, vector int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_eq(vector float a, vector float b)
{
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, a, b);
@@ -1204,87 +5645,87 @@ vec_any_eq(vector float a, vector float b)
/* vec_any_ge */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ge(vector float a, vector float b)
{
- return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, b, a);
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, a, b);
}
/* vec_any_gt */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_gt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, a, b);
@@ -1292,87 +5733,87 @@ vec_any_gt(vector float a, vector float b)
/* vec_any_le */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_le(vector float a, vector float b)
{
- return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, a, b);
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, b, a);
}
/* vec_any_lt */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector short a, vector short b)
{
return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector int a, vector int b)
{
return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, b, a);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_lt(vector float a, vector float b)
{
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, b, a);
@@ -1388,43 +5829,43 @@ vec_any_nan(vector float a)
/* vec_any_ne */
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector signed char a, vector signed char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector unsigned char a, vector unsigned char b)
{
return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector short a, vector short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector unsigned short a, vector unsigned short b)
{
return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector int a, vector int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, a, b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector unsigned int a, vector unsigned int b)
{
return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
}
-static int _ATTRS_o_ai
+static int __ATTRS_o_ai
vec_any_ne(vector float a, vector float b)
{
return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, a, b);
@@ -1478,6 +5919,6 @@ vec_any_out(vector float a, vector float b)
return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, a, b);
}
-#undef _ATTRS_o_ai
+#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/arm_neon.td b/contrib/llvm/tools/clang/lib/Headers/arm_neon.td
new file mode 100644
index 0000000..7ffbfb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/arm_neon.td
@@ -0,0 +1,341 @@
+//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM NEON header
+// file will be generated. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+class Op;
+
+def OP_NONE : Op;
+def OP_ADD : Op;
+def OP_SUB : Op;
+def OP_MUL : Op;
+def OP_MLA : Op;
+def OP_MLS : Op;
+def OP_MUL_N : Op;
+def OP_MLA_N : Op;
+def OP_MLS_N : Op;
+def OP_EQ : Op;
+def OP_GE : Op;
+def OP_LE : Op;
+def OP_GT : Op;
+def OP_LT : Op;
+def OP_NEG : Op;
+def OP_NOT : Op;
+def OP_AND : Op;
+def OP_OR : Op;
+def OP_XOR : Op;
+def OP_ANDN : Op;
+def OP_ORN : Op;
+def OP_CAST : Op;
+def OP_HI : Op;
+def OP_LO : Op;
+def OP_CONC : Op;
+def OP_DUP : Op;
+def OP_SEL : Op;
+def OP_REV64 : Op;
+def OP_REV32 : Op;
+def OP_REV16 : Op;
+
+class Inst <string p, string t, Op o> {
+ string Prototype = p;
+ string Types = t;
+ Op Operand = o;
+ bit isShift = 0;
+}
+
+// Used to generate Builtins.def
+class SInst<string p, string t> : Inst<p, t, OP_NONE> {}
+class IInst<string p, string t> : Inst<p, t, OP_NONE> {}
+class WInst<string p, string t> : Inst<p, t, OP_NONE> {}
+
+// prototype: return (arg, arg, ...)
+// v: void
+// t: best-fit integer (int/poly args)
+// x: signed integer (int/float args)
+// u: unsigned integer (int/float args)
+// f: float (int args)
+// d: default
+// w: double width elements, same num elts
+// n: double width elements, half num elts
+// h: half width elements, double num elts
+// e: half width elements, double num elts, unsigned
+// i: constant int
+// l: constant uint64
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// k: default elt width, double num elts
+// #: array of default vectors
+// p: pointer type
+// c: const pointer type
+
+// sizes:
+// c: char
+// s: short
+// i: int
+// l: long
+// f: float
+// h: half-float
+
+// size modifiers:
+// U: unsigned
+// Q: 128b
+// P: polynomial
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.1 Addition
+def VADD : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
+def VADDL : SInst<"wdd", "csiUcUsUi">;
+def VADDW : SInst<"wwd", "csiUcUsUi">;
+def VHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VRHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VQADD : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VADDHN : IInst<"dww", "csiUcUsUi">;
+def VRADDHN : IInst<"dww", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.2 Multiplication
+def VMUL : Inst<"ddd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_MUL>;
+def VMLA : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
+def VMLAL : SInst<"wwdd", "csiUcUsUi">;
+def VMLS : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
+def VMLSL : SInst<"wwdd", "csiUcUsUi">;
+def VQDMULH : SInst<"ddd", "siQsQi">;
+def VQRDMULH : SInst<"ddd", "siQsQi">;
+def VQDMLAL : SInst<"wwdd", "si">;
+def VQDMLSL : SInst<"wwdd", "si">;
+def VMULL : SInst<"wdd", "csiUcUsUiPc">;
+def VQDMULL : SInst<"wdd", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.3 Subtraction
+def VSUB : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
+def VSUBL : SInst<"wdd", "csiUcUsUi">;
+def VSUBW : SInst<"wwd", "csiUcUsUi">;
+def VQSUB : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VHSUB : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VSUBHN : IInst<"dww", "csiUcUsUi">;
+def VRSUBHN : IInst<"dww", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.4 Comparison
+def VCEQ : Inst<"udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
+def VCGE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
+def VCLE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
+def VCGT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
+def VCLT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
+def VCAGE : IInst<"udd", "fQf">;
+def VCALE : IInst<"udd", "fQf">;
+def VCAGT : IInst<"udd", "fQf">;
+def VCALT : IInst<"udd", "fQf">;
+def VTST : WInst<"udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.5 Absolute Difference
+def VABD : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VABDL : SInst<"wdd", "csiUcUsUi">;
+def VABA : SInst<"dddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VABAL : SInst<"wwdd", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.6 Max/Min
+def VMAX : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VMIN : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.7 Pairdise Addition
+def VPADD : IInst<"ddd", "csiUcUsUif">;
+def VPADDL : SInst<"nd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VPADAL : SInst<"nnd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.8-9 Folding Max/Min
+def VPMAX : SInst<"ddd", "csiUcUsUif">;
+def VPMIN : SInst<"ddd", "csiUcUsUif">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.10 Reciprocal/Sqrt
+def VRECPS : IInst<"ddd", "fQf">;
+def VRSQRTS : IInst<"ddd", "fQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.11 Shifts by signed variable
+def VSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.12 Shifts by constant
+let isShift = 1 in {
+def VSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSHL_N : IInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHLU_N : SInst<"udi", "csilQcQsQiQl">;
+def VSHRN_N : IInst<"hki", "silUsUiUl">;
+def VQSHRUN_N : SInst<"eki", "sil">;
+def VQRSHRUN_N : SInst<"eki", "sil">;
+def VQSHRN_N : SInst<"hki", "silUsUiUl">;
+def VRSHRN_N : IInst<"hki", "silUsUiUl">;
+def VQRSHRN_N : SInst<"hki", "silUsUiUl">;
+def VSHLL_N : SInst<"wdi", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.13 Shifts with insert
+def VSRI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+def VSLI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.14 Loads and stores of a single vector
+def VLD1 : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_LANE : WInst<"dci", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_DUP : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1 : WInst<"vpd", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1_LANE : WInst<"vpdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.15 Loads and stores of an N-element structure
+def VLD2 : WInst<"2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD3 : WInst<"3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD4 : WInst<"4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD2_DUP : WInst<"2c", "UcUsUiUlcsilhfPcPs">;
+def VLD3_DUP : WInst<"3c", "UcUsUiUlcsilhfPcPs">;
+def VLD4_DUP : WInst<"4c", "UcUsUiUlcsilhfPcPs">;
+def VLD2_LANE : WInst<"2ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD3_LANE : WInst<"3ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD4_LANE : WInst<"4ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST2 : WInst<"vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST3 : WInst<"vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST4 : WInst<"vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST2_LANE : WInst<"vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST3_LANE : WInst<"vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST4_LANE : WInst<"vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.16 Extract lanes from a vector
+def VGET_LANE : IInst<"sdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.17 Set lanes within a vector
+def VSET_LANE : IInst<"dsdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.18 Initialize a vector from bit pattern
+def VCREATE: Inst<"dl", "csihfUcUsUiUlPcPsl", OP_CAST>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.19 Set all lanes to same value
+def VDUP_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VMOV_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.20 Combining vectors
+def VCOMBINE : Inst<"kdd", "csilhfUcUsUiUlPcPs", OP_CONC>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.21 Splitting vectors
+def VGET_HIGH : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_HI>;
+def VGET_LOW : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_LO>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.22 Converting vectors
+def VCVT_S32 : SInst<"xd", "fQf">;
+def VCVT_U32 : SInst<"ud", "fQf">;
+def VCVT_F16 : SInst<"hk", "f">;
+def VCVT_N_S32 : SInst<"xdi", "fQf">;
+def VCVT_N_U32 : SInst<"udi", "fQf">;
+def VCVT_F32 : SInst<"fd", "iUiQiQUi">;
+def VCVT_F32_F16 : SInst<"kh", "f">;
+def VCVT_N_F32 : SInst<"fdi", "iUiQiQUi">;
+def VMOVN : IInst<"hk", "silUsUiUl">;
+def VMOVL : SInst<"wd", "csiUcUsUi">;
+def VQMOVN : SInst<"hk", "silUsUiUl">;
+def VQMOVUN : SInst<"ek", "sil">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.23-24 Table lookup, Extended table lookup
+def VTBL1 : WInst<"ddt", "UccPc">;
+def VTBL2 : WInst<"d2t", "UccPc">;
+def VTBL3 : WInst<"d3t", "UccPc">;
+def VTBL4 : WInst<"d4t", "UccPc">;
+def VTBX1 : WInst<"dddt", "UccPc">;
+def VTBX2 : WInst<"dd2t", "UccPc">;
+def VTBX3 : WInst<"dd3t", "UccPc">;
+def VTBX4 : WInst<"dd4t", "UccPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.25 Operations with a scalar value
+def VMLA_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
+def VMLAL_LANE : SInst<"wwddi", "siUsUi">;
+def VQDMLAL_LANE : SInst<"wwddi", "si">;
+def VMLS_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">;
+def VMLSL_LANE : SInst<"wwddi", "siUsUi">;
+def VQDMLSL_LANE : SInst<"wwddi", "si">;
+def VMUL_N : Inst<"dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
+def VMULL_N : SInst<"wda", "siUsUi">;
+def VMULL_LANE : SInst<"wddi", "siUsUi">;
+def VQDMULL_N : SInst<"wda", "si">;
+def VQDMULL_LANE : SInst<"wddi", "si">;
+def VQDMULH_N : SInst<"dda", "siQsQi">;
+def VQDMULH_LANE : SInst<"dddi", "siQsQi">;
+def VQRDMULH_N : SInst<"dda", "siQsQi">;
+def VQRDMULH_LANE : SInst<"dddi", "siQsQi">;
+def VMLA_N : Inst<"ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
+def VMLAL_N : SInst<"wwda", "siUsUi">;
+def VQDMLAL_N : SInst<"wwda", "si">;
+def VMLS_N : Inst<"ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
+def VMLSL_N : SInst<"wwda", "siUsUi">;
+def VQDMLSL_N : SInst<"wwda", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.26 Vector Extract
+def VEXT : WInst<"dddi", "cUcPcsUsPsiUilUlQcQUcQPcQsQUsQPsQiQUiQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.27 Reverse vector elements (sdap endianness)
+def VREV64 : Inst<"dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", OP_REV64>;
+def VREV32 : Inst<"dd", "csUcUsPcQcQsQUcQUsQPc", OP_REV32>;
+def VREV16 : Inst<"dd", "cUcPcQcQUcQPc", OP_REV16>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.28 Other single operand arithmetic
+def VABS : SInst<"dd", "csifQcQsQiQf">;
+def VQABS : SInst<"dd", "csiQcQsQi">;
+def VNEG : Inst<"dd", "csifQcQsQiQf", OP_NEG>;
+def VQNEG : SInst<"dd", "csiQcQsQi">;
+def VCLS : SInst<"dd", "csiQcQsQi">;
+def VCLZ : IInst<"dd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VCNT : WInst<"dd", "UccPcQUcQcQPc">;
+def VRECPE : SInst<"dd", "fUiQfQUi">;
+def VRSQRTE : SInst<"dd", "fUiQfQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.29 Logical operations
+def VMVN : Inst<"dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
+def VAND : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
+def VORR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
+def VEOR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
+def VBIC : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
+def VORN : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
+def VBSL : Inst<"dudd", "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.30 Transposition operations
+def VTRN: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VZIP: WInst<"2dd", "csUcUsfPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VUZP: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.31 Vector reinterpret cast operations
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
index 8afbe76..f297f36 100644
--- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -1222,9 +1222,10 @@ _mm_movemask_epi8(__m128i a)
4, 5, 6, 7))
#define _mm_shufflehi_epi16(a, imm) \
((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) {0}, 0, 1, 2, 3, \
- 4 + ((imm) & 0x3), 4 + ((imm) & 0xc) >> 2, \
- 4 + ((imm) & 0x30) >> 4, \
- 4 + ((imm) & 0xc0) >> 6))
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)))
static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi8(__m128i a, __m128i b)
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
index e271f99..4b0d9e7 100644
--- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -183,13 +183,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
#define _mm_extract_ps(X, N) (__extension__ \
({ union { int i; float f; } __t; \
- __v4sf __a = (__v4sf)X; \
+ __v4sf __a = (__v4sf)(X); \
__t.f = __a[N]; \
__t.i;}))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
-#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)X; \
+#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
(D) = __a[N]; }))
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
@@ -201,25 +201,25 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
_MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
-#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)X; \
+#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
__a[N] = I; \
__a;}))
-#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)X; \
+#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
__a[N] = I; \
__a;}))
#ifdef __x86_64__
-#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)X; \
+#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
__a[N] = I; \
__a;}))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. */
-#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)X; \
+#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
__a[N];}))
-#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)X; \
+#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
__a[N];}))
#ifdef __x86_64__
-#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)X; \
+#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
__a[N];}))
#endif /* __x86_64 */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h
index 6868ad3..b1d0d52 100644
--- a/contrib/llvm/tools/clang/lib/Headers/stddef.h
+++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h
@@ -45,6 +45,13 @@ typedef __typeof__(*L"") wchar_t;
#define NULL ((void*)0)
#endif
+// Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+// __WINT_TYPE__ directly; accomodate both by requiring __need_wint_t
+#if defined(__need_wint_t) && !defined(_WINT_T)
+#define _WINT_T
+typedef __WINT_TYPE__ wint_t;
+#endif
+
#define offsetof(t, d) __builtin_offsetof(t, d)
#endif /* __STDDEF_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdint.h b/contrib/llvm/tools/clang/lib/Headers/stdint.h
index 1785f31..9498ed5 100644
--- a/contrib/llvm/tools/clang/lib/Headers/stdint.h
+++ b/contrib/llvm/tools/clang/lib/Headers/stdint.h
@@ -233,8 +233,8 @@ typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
/* C99 7.18.1.5 Greatest-width integer types.
*/
-typedef __intn_t(__INTMAX_WIDTH__) intmax_t;
-typedef __uintn_t(__INTMAX_WIDTH__) uintmax_t;
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
/* C99 7.18.4 Macros for minimum-width integer constants.
*
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
index 3e82e28..75e06b5 100644
--- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -607,10 +607,10 @@ _mm_storer_ps(float *p, __m128 a)
#define _MM_HINT_T2 3
#define _MM_HINT_NTA 0
-/* FIXME: We have to #define this because "sel" must be a constant integer, and
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
Sema doesn't do any form of constant propagation yet. */
-#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)a, 0, sel))
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, sel))
static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pi(__m64 *p, __m64 a)
@@ -723,7 +723,7 @@ _mm_setcsr(unsigned int i)
}
#define _mm_shuffle_ps(a, b, mask) \
- (__builtin_shufflevector((__v4sf)a, (__v4sf)b, \
+ (__builtin_shufflevector((__v4sf)(a), (__v4sf)(b), \
(mask) & 0x3, ((mask) & 0xc) >> 2, \
(((mask) & 0x30) >> 4) + 4, \
(((mask) & 0xc0) >> 6) + 4))
diff --git a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
index 6403319..dedcc0e 100644
--- a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp
@@ -55,7 +55,7 @@ void CGBuilder::VisitCallExpr(CallExpr *CE) {
}
}
-CallGraph::CallGraph() : Root(0) {
+CallGraph::CallGraph(Program &P) : Prog(P), Root(0) {
ExternalCallingNode = getOrInsertFunction(Entity());
}
diff --git a/contrib/llvm/tools/clang/lib/Index/Entity.cpp b/contrib/llvm/tools/clang/lib/Index/Entity.cpp
index cd9d277..7a24719 100644
--- a/contrib/llvm/tools/clang/lib/Index/Entity.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/Entity.cpp
@@ -42,14 +42,48 @@ public:
EntityGetter(Program &prog, ProgramImpl &progImpl)
: Prog(prog), ProgImpl(progImpl) { }
+ // Get an Entity.
+ Entity getEntity(Entity Parent, DeclarationName Name,
+ unsigned IdNS, bool isObjCInstanceMethod);
+
+ // Get an Entity associated with the name in the global namespace.
+ Entity getGlobalEntity(llvm::StringRef Name);
+
Entity VisitNamedDecl(NamedDecl *D);
Entity VisitVarDecl(VarDecl *D);
+ Entity VisitFieldDecl(FieldDecl *D);
Entity VisitFunctionDecl(FunctionDecl *D);
+ Entity VisitTypeDecl(TypeDecl *D);
};
}
}
+Entity EntityGetter::getEntity(Entity Parent, DeclarationName Name,
+ unsigned IdNS, bool isObjCInstanceMethod) {
+ llvm::FoldingSetNodeID ID;
+ EntityImpl::Profile(ID, Parent, Name, IdNS, isObjCInstanceMethod);
+
+ ProgramImpl::EntitySetTy &Entities = ProgImpl.getEntities();
+ void *InsertPos = 0;
+ if (EntityImpl *Ent = Entities.FindNodeOrInsertPos(ID, InsertPos))
+ return Entity(Ent);
+
+ void *Buf = ProgImpl.Allocate(sizeof(EntityImpl));
+ EntityImpl *New =
+ new (Buf) EntityImpl(Parent, Name, IdNS, isObjCInstanceMethod);
+ Entities.InsertNode(New, InsertPos);
+
+ return Entity(New);
+}
+
+Entity EntityGetter::getGlobalEntity(llvm::StringRef Name) {
+ IdentifierInfo *II = &ProgImpl.getIdents().get(Name);
+ DeclarationName GlobName(II);
+ unsigned IdNS = Decl::IDNS_Ordinary;
+ return getEntity(Entity(), GlobName, IdNS, false);
+}
+
Entity EntityGetter::VisitNamedDecl(NamedDecl *D) {
Entity Parent;
if (!D->getDeclContext()->isTranslationUnit()) {
@@ -91,24 +125,14 @@ Entity EntityGetter::VisitNamedDecl(NamedDecl *D) {
ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D);
bool isObjCInstanceMethod = MD && MD->isInstanceMethod();
-
- llvm::FoldingSetNodeID ID;
- EntityImpl::Profile(ID, Parent, GlobName, IdNS, isObjCInstanceMethod);
-
- ProgramImpl::EntitySetTy &Entities = ProgImpl.getEntities();
- void *InsertPos = 0;
- if (EntityImpl *Ent = Entities.FindNodeOrInsertPos(ID, InsertPos))
- return Entity(Ent);
-
- void *Buf = ProgImpl.Allocate(sizeof(EntityImpl));
- EntityImpl *New =
- new (Buf) EntityImpl(Parent, GlobName, IdNS, isObjCInstanceMethod);
- Entities.InsertNode(New, InsertPos);
-
- return Entity(New);
+ return getEntity(Parent, GlobName, IdNS, isObjCInstanceMethod);
}
Entity EntityGetter::VisitVarDecl(VarDecl *D) {
+ // Local variables have no linkage, make invalid Entities.
+ if (D->hasLocalStorage())
+ return Entity();
+
// If it's static it cannot be referred to by another translation unit.
if (D->getStorageClass() == VarDecl::Static)
return Entity(D);
@@ -124,6 +148,18 @@ Entity EntityGetter::VisitFunctionDecl(FunctionDecl *D) {
return VisitNamedDecl(D);
}
+Entity EntityGetter::VisitFieldDecl(FieldDecl *D) {
+ // Make FieldDecl an invalid Entity since it has no linkage.
+ return Entity();
+}
+
+Entity EntityGetter::VisitTypeDecl(TypeDecl *D) {
+ // Although in C++ class name has external linkage, usually the definition of
+ // the class is available in the same translation unit when it's needed. So we
+ // make all of them invalid Entity.
+ return Entity();
+}
+
//===----------------------------------------------------------------------===//
// EntityImpl Implementation
//===----------------------------------------------------------------------===//
@@ -172,6 +208,12 @@ Entity EntityImpl::get(Decl *D, Program &Prog, ProgramImpl &ProgImpl) {
return EntityGetter(Prog, ProgImpl).Visit(D);
}
+/// \brief Get an Entity associated with a global name.
+Entity EntityImpl::get(llvm::StringRef Name, Program &Prog,
+ ProgramImpl &ProgImpl) {
+ return EntityGetter(Prog, ProgImpl).getGlobalEntity(Name);
+}
+
std::string EntityImpl::getPrintableName() {
return Name.getAsString();
}
@@ -217,6 +259,11 @@ Entity Entity::get(Decl *D, Program &Prog) {
return EntityImpl::get(D, Prog, ProgImpl);
}
+Entity Entity::get(llvm::StringRef Name, Program &Prog) {
+ ProgramImpl &ProgImpl = *static_cast<ProgramImpl*>(Prog.Impl);
+ return EntityImpl::get(Name, Prog, ProgImpl);
+}
+
unsigned
llvm::DenseMapInfo<Entity>::getHashValue(Entity E) {
return DenseMapInfo<void*>::getHashValue(E.getAsOpaquePtr());
diff --git a/contrib/llvm/tools/clang/lib/Index/EntityImpl.h b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h
index cbce934..da52ccf 100644
--- a/contrib/llvm/tools/clang/lib/Index/EntityImpl.h
+++ b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h
@@ -47,6 +47,7 @@ public:
/// \brief Get an Entity associated with the given Decl.
/// \returns Null if an Entity cannot refer to this Decl.
static Entity get(Decl *D, Program &Prog, ProgramImpl &ProgImpl);
+ static Entity get(llvm::StringRef Name, Program &Prog, ProgramImpl &ProgImpl);
std::string getPrintableName();
diff --git a/contrib/llvm/tools/clang/lib/Index/Indexer.cpp b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp
index 57bfc5b..7f21c4f 100644
--- a/contrib/llvm/tools/clang/lib/Index/Indexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp
@@ -25,14 +25,22 @@ namespace {
class EntityIndexer : public EntityHandler {
TranslationUnit *TU;
Indexer::MapTy &Map;
+ Indexer::DefMapTy &DefMap;
public:
- EntityIndexer(TranslationUnit *tu, Indexer::MapTy &map) : TU(tu), Map(map) { }
+ EntityIndexer(TranslationUnit *tu, Indexer::MapTy &map,
+ Indexer::DefMapTy &defmap)
+ : TU(tu), Map(map), DefMap(defmap) { }
virtual void Handle(Entity Ent) {
if (Ent.isInternalToTU())
return;
Map[Ent].insert(TU);
+
+ Decl *D = Ent.getDecl(TU->getASTContext());
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isThisDeclarationADefinition())
+ DefMap[Ent] = std::make_pair(FD, TU);
}
};
@@ -62,7 +70,7 @@ void Indexer::IndexAST(TranslationUnit *TU) {
assert(TU && "Passed null TranslationUnit");
ASTContext &Ctx = TU->getASTContext();
CtxTUMap[&Ctx] = TU;
- EntityIndexer Idx(TU, Map);
+ EntityIndexer Idx(TU, Map, DefMap);
Prog.FindEntities(Ctx, Idx);
SelectorIndexer SelIdx(Prog, TU, SelMap);
@@ -102,3 +110,12 @@ void Indexer::GetTranslationUnitsFor(GlobalSelector Sel,
for (TUSetTy::iterator I = Set.begin(), E = Set.end(); I != E; ++I)
Handler.Handle(*I);
}
+
+std::pair<FunctionDecl *, TranslationUnit *>
+Indexer::getDefinitionFor(Entity Ent) {
+ DefMapTy::iterator I = DefMap.find(Ent);
+ if (I == DefMap.end())
+ return std::make_pair((FunctionDecl *)0, (TranslationUnit *)0);
+ else
+ return I->second;
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/Makefile b/contrib/llvm/tools/clang/lib/Index/Makefile
index 4d86713..e87e638 100644
--- a/contrib/llvm/tools/clang/lib/Index/Makefile
+++ b/contrib/llvm/tools/clang/lib/Index/Makefile
@@ -11,17 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-include $(LEVEL)/Makefile.config
-
+CLANG_LEVEL := ../..
LIBRARYNAME := clangIndex
BUILD_ARCHIVE = 1
-ifeq ($(ARCH),PowerPC)
-CXX.Flags += -maltivec
-endif
-
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
index cd153e1..91b14f6 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -752,19 +752,21 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '"') {
- // Skip escaped characters.
- if (C == '\\') {
- // Skip the escaped character.
+ // Skip escaped characters. Escaped newlines will already be processed by
+ // getAndAdvanceChar.
+ if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
- } else if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
if (!isLexingRawMode() && !Features.AsmPreprocessor)
Diag(BufferPtr, diag::err_unterminated_string);
FormTokenWithChars(Result, CurPtr-1, tok::unknown);
return;
- } else if (C == 0) {
- NulCharacter = CurPtr-1;
}
+
+ if (C == 0)
+ NulCharacter = CurPtr-1;
C = getAndAdvanceChar(CurPtr, Result);
}
@@ -818,41 +820,33 @@ void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
const char *NulCharacter = 0; // Does this character contain the \0 character?
- // Handle the common case of 'x' and '\y' efficiently.
char C = getAndAdvanceChar(CurPtr, Result);
if (C == '\'') {
if (!isLexingRawMode() && !Features.AsmPreprocessor)
Diag(BufferPtr, diag::err_empty_character);
FormTokenWithChars(Result, CurPtr, tok::unknown);
return;
- } else if (C == '\\') {
- // Skip the escaped character.
- // FIXME: UCN's.
- C = getAndAdvanceChar(CurPtr, Result);
}
- if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
- ++CurPtr;
- } else {
- // Fall back on generic code for embedded nulls, newlines, wide chars.
- do {
- // Skip escaped characters.
- if (C == '\\') {
- // Skip the escaped character.
- C = getAndAdvanceChar(CurPtr, Result);
- } else if (C == '\n' || C == '\r' || // Newline.
- (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
- if (!isLexingRawMode() && !Features.AsmPreprocessor)
- Diag(BufferPtr, diag::err_unterminated_char);
- FormTokenWithChars(Result, CurPtr-1, tok::unknown);
- return;
- } else if (C == 0) {
- NulCharacter = CurPtr-1;
- }
+ while (C != '\'') {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ // FIXME: UCN's
C = getAndAdvanceChar(CurPtr, Result);
- } while (C != '\'');
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ Diag(BufferPtr, diag::err_unterminated_char);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
}
+ // If a nul character existed in the character, warn about it.
if (NulCharacter && !isLexingRawMode())
Diag(NulCharacter, diag::null_in_char);
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
index b73f236..b8fd3ce 100644
--- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -169,9 +169,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf,
/// we will likely rework our support for UCN's.
static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
char *&ResultBuf, bool &HadError,
- SourceLocation Loc, bool IsWide, Preprocessor &PP,
- bool Complain)
-{
+ SourceLocation Loc, Preprocessor &PP,
+ bool Complain) {
// FIXME: Add a warning - UCN's are only valid in C++ & C99.
// FIXME: Handle wide strings.
@@ -835,11 +834,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
// TODO: Input character set mapping support.
// Skip L marker for wide strings.
- bool ThisIsWide = false;
- if (ThisTokBuf[0] == 'L') {
+ if (ThisTokBuf[0] == 'L')
++ThisTokBuf;
- ThisIsWide = true;
- }
assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
++ThisTokBuf;
@@ -884,14 +880,13 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
// Is this a Universal Character Name escape?
if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
ProcessUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
- hadError, StringToks[i].getLocation(), ThisIsWide, PP,
- Complain);
+ hadError, StringToks[i].getLocation(), PP, Complain);
continue;
}
// Otherwise, this is a non-UCN escape character. Process it.
unsigned ResultChar = ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
StringToks[i].getLocation(),
- ThisIsWide, PP, Complain);
+ AnyWide, PP, Complain);
// Note: our internal rep of wide char tokens is always little-endian.
*ResultPtr++ = ResultChar & 0xFF;
@@ -905,6 +900,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
if (Pascal) {
ResultBuf[0] = ResultPtr-&ResultBuf[0]-1;
+ if (AnyWide)
+ ResultBuf[0] /= wchar_tByteWidth;
// Verify that pascal strings aren't too large.
if (GetStringLength() > 256 && Complain) {
diff --git a/contrib/llvm/tools/clang/lib/Lex/Makefile b/contrib/llvm/tools/clang/lib/Lex/Makefile
index bd3c7a8..938b8d5 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Makefile
+++ b/contrib/llvm/tools/clang/lib/Lex/Makefile
@@ -11,8 +11,8 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-include $(LEVEL)/Makefile.config
+CLANG_LEVEL := ../..
+include $(CLANG_LEVEL)/../../Makefile.config
LIBRARYNAME := clangLex
BUILD_ARCHIVE = 1
@@ -21,7 +21,5 @@ ifeq ($(ARCH),PowerPC)
CXX.Flags += -maltivec
endif
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
index 6aeb6fa..3310659 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
@@ -45,6 +45,9 @@ void Preprocessor::Backtrack() {
}
void Preprocessor::CachingLex(Token &Result) {
+ if (!InCachingLexMode())
+ return;
+
if (CachedLexPos < CachedTokens.size()) {
Result = CachedTokens[CachedLexPos++];
return;
@@ -60,13 +63,10 @@ void Preprocessor::CachingLex(Token &Result) {
return;
}
- // We should cache the lexed token.
-
+ // Cache the lexed token.
EnterCachingLexMode();
- if (Result.isNot(tok::eof)) {
- CachedTokens.push_back(Result);
- ++CachedLexPos;
- }
+ CachedTokens.push_back(Result);
+ ++CachedLexPos;
}
void Preprocessor::EnterCachingLexMode() {
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
index 71bb4fc..ebf606e 100644
--- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -17,6 +17,7 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LexDiagnostic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
@@ -510,6 +511,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
//.Case("cxx_nullptr", false)
//.Case("cxx_rvalue_references", false)
//.Case("cxx_variadic_templates", false)
+ .Case("tls", PP.getTargetInfo().isTLSSupported())
.Default(false);
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
index 92332a0..7bf4094 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -27,41 +27,47 @@ PragmaHandler::~PragmaHandler() {
}
//===----------------------------------------------------------------------===//
+// EmptyPragmaHandler Implementation.
+//===----------------------------------------------------------------------===//
+
+EmptyPragmaHandler::EmptyPragmaHandler() {}
+
+void EmptyPragmaHandler::HandlePragma(Preprocessor &PP, Token &FirstToken) {}
+
+//===----------------------------------------------------------------------===//
// PragmaNamespace Implementation.
//===----------------------------------------------------------------------===//
PragmaNamespace::~PragmaNamespace() {
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i)
- delete Handlers[i];
+ for (llvm::StringMap<PragmaHandler*>::iterator
+ I = Handlers.begin(), E = Handlers.end(); I != E; ++I)
+ delete I->second;
}
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null identifier if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
-PragmaHandler *PragmaNamespace::FindHandler(const IdentifierInfo *Name,
+PragmaHandler *PragmaNamespace::FindHandler(llvm::StringRef Name,
bool IgnoreNull) const {
- PragmaHandler *NullHandler = 0;
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- if (Handlers[i]->getName() == Name)
- return Handlers[i];
+ if (PragmaHandler *Handler = Handlers.lookup(Name))
+ return Handler;
+ return IgnoreNull ? 0 : Handlers.lookup(llvm::StringRef());
+}
- if (Handlers[i]->getName() == 0)
- NullHandler = Handlers[i];
- }
- return IgnoreNull ? 0 : NullHandler;
+void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
+ assert(!Handlers.lookup(Handler->getName()) &&
+ "A handler with this name is already registered in this namespace");
+ llvm::StringMapEntry<PragmaHandler *> &Entry =
+ Handlers.GetOrCreateValue(Handler->getName());
+ Entry.setValue(Handler);
}
void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- if (Handlers[i] == Handler) {
- Handlers[i] = Handlers.back();
- Handlers.pop_back();
- return;
- }
- }
- assert(0 && "Handler not registered in this namespace");
+ assert(Handlers.lookup(Handler->getName()) &&
+ "Handler not registered in this namespace");
+ Handlers.erase(Handler->getName());
}
void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
@@ -70,7 +76,10 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
PP.LexUnexpandedToken(Tok);
// Get the handler for this token. If there is no handler, ignore the pragma.
- PragmaHandler *Handler = FindHandler(Tok.getIdentifierInfo(), false);
+ PragmaHandler *Handler
+ = FindHandler(Tok.getIdentifierInfo() ? Tok.getIdentifierInfo()->getName()
+ : llvm::StringRef(),
+ /*IgnoreNull=*/false);
if (Handler == 0) {
PP.Diag(Tok, diag::warn_pragma_ignored);
return;
@@ -411,31 +420,90 @@ void Preprocessor::HandlePragmaComment(Token &Tok) {
Callbacks->PragmaComment(CommentLoc, II, ArgumentString);
}
+/// HandlePragmaMessage - Handle the microsoft #pragma message extension. The
+/// syntax is:
+/// #pragma message(messagestring)
+/// messagestring is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters etc. See MSDN for more details.
+void Preprocessor::HandlePragmaMessage(Token &Tok) {
+ SourceLocation MessageLoc = Tok.getLocation();
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(MessageLoc, diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // Read the string.
+ Lex(Tok);
+
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ llvm::SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ StrToks.push_back(Tok);
+ Lex(Tok);
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this);
+ assert(!Literal.AnyWide && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ Diag(StrToks[0].getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ llvm::StringRef MessageString(Literal.GetString(), Literal.GetStringLength());
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+ Lex(Tok); // eat the r_paren.
+
+ if (Tok.isNot(tok::eom)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // Output the message.
+ Diag(MessageLoc, diag::warn_pragma_message) << MessageString;
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (Callbacks)
+ Callbacks->PragmaMessage(MessageLoc, MessageString);
+}
/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
/// If 'Namespace' is non-null, then it is a token required to exist on the
/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
-void Preprocessor::AddPragmaHandler(const char *Namespace,
+void Preprocessor::AddPragmaHandler(llvm::StringRef Namespace,
PragmaHandler *Handler) {
PragmaNamespace *InsertNS = PragmaHandlers;
// If this is specified to be in a namespace, step down into it.
- if (Namespace) {
- IdentifierInfo *NSID = getIdentifierInfo(Namespace);
-
+ if (!Namespace.empty()) {
// If there is already a pragma handler with the name of this namespace,
// we either have an error (directive with the same name as a namespace) or
// we already have the namespace to insert into.
- if (PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID)) {
+ if (PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace)) {
InsertNS = Existing->getIfNamespace();
assert(InsertNS != 0 && "Cannot have a pragma namespace and pragma"
" handler with the same name!");
} else {
// Otherwise, this namespace doesn't exist yet, create and insert the
// handler for it.
- InsertNS = new PragmaNamespace(NSID);
+ InsertNS = new PragmaNamespace(Namespace);
PragmaHandlers->AddPragma(InsertNS);
}
}
@@ -450,14 +518,13 @@ void Preprocessor::AddPragmaHandler(const char *Namespace,
/// preprocessor. If \arg Namespace is non-null, then it should be the
/// namespace that \arg Handler was added to. It is an error to remove
/// a handler that has not been registered.
-void Preprocessor::RemovePragmaHandler(const char *Namespace,
+void Preprocessor::RemovePragmaHandler(llvm::StringRef Namespace,
PragmaHandler *Handler) {
PragmaNamespace *NS = PragmaHandlers;
// If this is specified to be in a namespace, step down into it.
- if (Namespace) {
- IdentifierInfo *NSID = getIdentifierInfo(Namespace);
- PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID);
+ if (!Namespace.empty()) {
+ PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace);
assert(Existing && "Namespace containing handler does not exist!");
NS = Existing->getIfNamespace();
@@ -475,7 +542,7 @@ void Preprocessor::RemovePragmaHandler(const char *Namespace,
namespace {
/// PragmaOnceHandler - "#pragma once" marks the file as atomically included.
struct PragmaOnceHandler : public PragmaHandler {
- PragmaOnceHandler(const IdentifierInfo *OnceID) : PragmaHandler(OnceID) {}
+ PragmaOnceHandler() : PragmaHandler("once") {}
virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) {
PP.CheckEndOfDirective("pragma once");
PP.HandlePragmaOnce(OnceTok);
@@ -485,7 +552,7 @@ struct PragmaOnceHandler : public PragmaHandler {
/// PragmaMarkHandler - "#pragma mark ..." is ignored by the compiler, and the
/// rest of the line is not lexed.
struct PragmaMarkHandler : public PragmaHandler {
- PragmaMarkHandler(const IdentifierInfo *MarkID) : PragmaHandler(MarkID) {}
+ PragmaMarkHandler() : PragmaHandler("mark") {}
virtual void HandlePragma(Preprocessor &PP, Token &MarkTok) {
PP.HandlePragmaMark();
}
@@ -493,7 +560,7 @@ struct PragmaMarkHandler : public PragmaHandler {
/// PragmaPoisonHandler - "#pragma poison x" marks x as not usable.
struct PragmaPoisonHandler : public PragmaHandler {
- PragmaPoisonHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaPoisonHandler() : PragmaHandler("poison") {}
virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) {
PP.HandlePragmaPoison(PoisonTok);
}
@@ -502,14 +569,14 @@ struct PragmaPoisonHandler : public PragmaHandler {
/// PragmaSystemHeaderHandler - "#pragma system_header" marks the current file
/// as a system header, which silences warnings in it.
struct PragmaSystemHeaderHandler : public PragmaHandler {
- PragmaSystemHeaderHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
virtual void HandlePragma(Preprocessor &PP, Token &SHToken) {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("pragma");
}
};
struct PragmaDependencyHandler : public PragmaHandler {
- PragmaDependencyHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaDependencyHandler() : PragmaHandler("dependency") {}
virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
PP.HandlePragmaDependency(DepToken);
}
@@ -523,9 +590,9 @@ struct PragmaDiagnosticHandler : public PragmaHandler {
private:
const bool ClangMode;
public:
- PragmaDiagnosticHandler(const IdentifierInfo *ID,
- const bool clangMode) : PragmaHandler(ID),
- ClangMode(clangMode) {}
+ explicit PragmaDiagnosticHandler(const bool clangMode)
+ : PragmaHandler("diagnostic"), ClangMode(clangMode) {}
+
virtual void HandlePragma(Preprocessor &PP, Token &DiagToken) {
Token Tok;
PP.LexUnexpandedToken(Tok);
@@ -618,12 +685,20 @@ public:
/// PragmaCommentHandler - "#pragma comment ...".
struct PragmaCommentHandler : public PragmaHandler {
- PragmaCommentHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaCommentHandler() : PragmaHandler("comment") {}
virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) {
PP.HandlePragmaComment(CommentTok);
}
};
+/// PragmaMessageHandler - "#pragma message("...")".
+struct PragmaMessageHandler : public PragmaHandler {
+ PragmaMessageHandler() : PragmaHandler("message") {}
+ virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) {
+ PP.HandlePragmaMessage(CommentTok);
+ }
+};
+
// Pragma STDC implementations.
enum STDCSetting {
@@ -660,7 +735,7 @@ static STDCSetting LexOnOffSwitch(Preprocessor &PP) {
/// PragmaSTDC_FP_CONTRACTHandler - "#pragma STDC FP_CONTRACT ...".
struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler {
- PragmaSTDC_FP_CONTRACTHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaSTDC_FP_CONTRACTHandler() : PragmaHandler("FP_CONTRACT") {}
virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
// We just ignore the setting of FP_CONTRACT. Since we don't do contractions
// at all, our default is OFF and setting it to ON is an optimization hint
@@ -672,7 +747,7 @@ struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler {
/// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...".
struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
- PragmaSTDC_FENV_ACCESSHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
if (LexOnOffSwitch(PP) == STDC_ON)
PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
@@ -681,8 +756,8 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
/// PragmaSTDC_CX_LIMITED_RANGEHandler - "#pragma STDC CX_LIMITED_RANGE ...".
struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
- PragmaSTDC_CX_LIMITED_RANGEHandler(const IdentifierInfo *ID)
- : PragmaHandler(ID) {}
+ PragmaSTDC_CX_LIMITED_RANGEHandler()
+ : PragmaHandler("CX_LIMITED_RANGE") {}
virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
LexOnOffSwitch(PP);
}
@@ -690,7 +765,7 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
/// PragmaSTDC_UnknownHandler - "#pragma STDC ...".
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
- PragmaSTDC_UnknownHandler() : PragmaHandler(0) {}
+ PragmaSTDC_UnknownHandler() {}
virtual void HandlePragma(Preprocessor &PP, Token &UnknownTok) {
// C99 6.10.6p2, unknown forms are not allowed.
PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
@@ -703,38 +778,28 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler {
/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
/// #pragma GCC poison/system_header/dependency and #pragma once.
void Preprocessor::RegisterBuiltinPragmas() {
- AddPragmaHandler(0, new PragmaOnceHandler(getIdentifierInfo("once")));
- AddPragmaHandler(0, new PragmaMarkHandler(getIdentifierInfo("mark")));
+ AddPragmaHandler(new PragmaOnceHandler());
+ AddPragmaHandler(new PragmaMarkHandler());
// #pragma GCC ...
- AddPragmaHandler("GCC", new PragmaPoisonHandler(getIdentifierInfo("poison")));
- AddPragmaHandler("GCC", new PragmaSystemHeaderHandler(
- getIdentifierInfo("system_header")));
- AddPragmaHandler("GCC", new PragmaDependencyHandler(
- getIdentifierInfo("dependency")));
- AddPragmaHandler("GCC", new PragmaDiagnosticHandler(
- getIdentifierInfo("diagnostic"),
- false));
+ AddPragmaHandler("GCC", new PragmaPoisonHandler());
+ AddPragmaHandler("GCC", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("GCC", new PragmaDependencyHandler());
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler(false));
// #pragma clang ...
- AddPragmaHandler("clang", new PragmaPoisonHandler(
- getIdentifierInfo("poison")));
- AddPragmaHandler("clang", new PragmaSystemHeaderHandler(
- getIdentifierInfo("system_header")));
- AddPragmaHandler("clang", new PragmaDependencyHandler(
- getIdentifierInfo("dependency")));
- AddPragmaHandler("clang", new PragmaDiagnosticHandler(
- getIdentifierInfo("diagnostic"),
- true));
-
- AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler(
- getIdentifierInfo("FP_CONTRACT")));
- AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler(
- getIdentifierInfo("FENV_ACCESS")));
- AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler(
- getIdentifierInfo("CX_LIMITED_RANGE")));
+ AddPragmaHandler("clang", new PragmaPoisonHandler());
+ AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("clang", new PragmaDependencyHandler());
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler(true));
+
+ AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
// MS extensions.
- if (Features.Microsoft)
- AddPragmaHandler(0, new PragmaCommentHandler(getIdentifierInfo("comment")));
+ if (Features.Microsoft) {
+ AddPragmaHandler(new PragmaCommentHandler());
+ AddPragmaHandler(new PragmaMessageHandler());
+ }
}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
index ce6d9ab..51f7293 100644
--- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -87,7 +87,7 @@ Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
// Initialize the pragma handlers.
- PragmaHandlers = new PragmaNamespace(0);
+ PragmaHandlers = new PragmaNamespace(llvm::StringRef());
RegisterBuiltinPragmas();
// Initialize builtin macros like __LINE__ and friends.
@@ -113,6 +113,14 @@ Preprocessor::~Preprocessor() {
I->second->Destroy(BP);
I->first->setHasMacroDefinition(false);
}
+ for (std::vector<MacroInfo*>::iterator I = MICache.begin(),
+ E = MICache.end(); I != E; ++I) {
+ // We don't need to free the MacroInfo objects directly. These
+ // will be released when the BumpPtrAllocator 'BP' object gets
+ // destroyed. We still need to run the dtor, however, to free
+ // memory alocated by MacroInfo.
+ (*I)->Destroy(BP);
+ }
// Free any cached macro expanders.
for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
diff --git a/contrib/llvm/tools/clang/lib/Makefile b/contrib/llvm/tools/clang/lib/Makefile
index 538bf43..4fca624 100755
--- a/contrib/llvm/tools/clang/lib/Makefile
+++ b/contrib/llvm/tools/clang/lib/Makefile
@@ -6,10 +6,10 @@
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../..
+CLANG_LEVEL := ..
-PARALLEL_DIRS = Headers Runtime Basic Lex Parse AST Sema CodeGen Analysis \
+PARALLEL_DIRS = Headers Basic Lex Parse AST Sema CodeGen Analysis \
Checker Rewrite Frontend Index Driver
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp
index 1ebff22..98d5d07 100644
--- a/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp
@@ -119,6 +119,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("cf_returns_not_retained", AT_cf_returns_not_retained)
.Case("cf_returns_retained", AT_cf_returns_retained)
.Case("reqd_work_group_size", AT_reqd_wg_size)
+ .Case("init_priority", AT_init_priority)
.Case("no_instrument_function", AT_no_instrument_function)
.Case("thiscall", AT_thiscall)
.Case("__cdecl", AT_cdecl)
diff --git a/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt
index bec1c6e..fafcf77 100644
--- a/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt
@@ -18,4 +18,4 @@ add_clang_library(clangParse
Parser.cpp
)
-add_dependencies(clangParse ClangDiagnosticParse)
+add_dependencies(clangParse ClangAttrList ClangDiagnosticParse)
diff --git a/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp
index 5dc08b3..d2cd744 100644
--- a/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp
@@ -253,7 +253,8 @@ bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
TypeSpecWidth = W;
TSWLoc = Loc;
- if (TypeAltiVecVector && ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) {
+ if (TypeAltiVecVector && !TypeAltiVecBool &&
+ ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
DiagID = diag::warn_vector_long_decl_spec_combination;
return true;
@@ -290,13 +291,18 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
DiagID = diag::err_invalid_decl_spec_combination;
return true;
}
+ if (TypeAltiVecVector && (T == TST_bool) && !TypeAltiVecBool) {
+ TypeAltiVecBool = true;
+ TSTLoc = Loc;
+ return false;
+ }
TypeSpecType = T;
TypeRep = Rep;
TSTLoc = Loc;
TypeSpecOwned = Owned;
- if (TypeAltiVecVector && (TypeSpecType == TST_double)) {
+ if (TypeAltiVecVector && !TypeAltiVecBool && (TypeSpecType == TST_double)) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
- DiagID = diag::err_invalid_vector_double_decl_spec_combination;
+ DiagID = diag::err_invalid_vector_decl_spec;
return true;
}
return false;
@@ -316,14 +322,12 @@ bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID) {
- if (!TypeAltiVecVector || (TypeSpecType != TST_unspecified)) {
+ if (!TypeAltiVecVector || TypeAltiVecPixel ||
+ (TypeSpecType != TST_unspecified)) {
PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
DiagID = diag::err_invalid_pixel_decl_spec_combination;
return true;
}
- TypeSpecType = TST_int;
- TypeSpecSign = TSS_unsigned;
- TypeSpecWidth = TSW_short;
TypeAltiVecPixel = isAltiVecPixel;
TSTLoc = Loc;
return false;
@@ -438,6 +442,42 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
// Check the type specifier components first.
SourceManager &SrcMgr = PP.getSourceManager();
+ // Validate and finalize AltiVec vector declspec.
+ if (TypeAltiVecVector) {
+ if (TypeAltiVecBool) {
+ // Sign specifiers are not allowed with vector bool. (PIM 2.1)
+ if (TypeSpecSign != TSS_unspecified) {
+ Diag(D, TSSLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSS)TypeSpecSign);
+ }
+
+ // Only char/int are valid with vector bool. (PIM 2.1)
+ if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
+ (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
+ Diag(D, TSTLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ << (TypeAltiVecPixel ? "__pixel" :
+ getSpecifierName((TST)TypeSpecType));
+ }
+
+ // Only 'short' is valid with vector bool. (PIM 2.1)
+ if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short))
+ Diag(D, TSWLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSW)TypeSpecWidth);
+
+ // Elements of vector bool are interpreted as unsigned. (PIM 2.1)
+ if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
+ (TypeSpecWidth != TSW_unspecified))
+ TypeSpecSign = TSS_unsigned;
+ }
+
+ if (TypeAltiVecPixel) {
+ //TODO: perform validation
+ TypeSpecType = TST_int;
+ TypeSpecSign = TSS_unsigned;
+ TypeSpecWidth = TSW_short;
+ }
+ }
+
// signed/unsigned are only valid with int/char/wchar_t.
if (TypeSpecSign != TSS_unspecified) {
if (TypeSpecType == TST_unspecified)
@@ -513,7 +553,6 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
ClearStorageClassSpecs();
}
-
// Okay, now we can infer the real type.
// TODO: return "auto function" and other bad things based on the real type.
diff --git a/contrib/llvm/tools/clang/lib/Parse/Makefile b/contrib/llvm/tools/clang/lib/Parse/Makefile
index 6a5540f..238e02d 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Makefile
+++ b/contrib/llvm/tools/clang/lib/Parse/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangParse
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 5405c0c..62a7ecd 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -35,10 +35,10 @@ Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
DeclPtrTy FnD;
if (D.getDeclSpec().isFriendSpecified())
// FIXME: Friend templates
- FnD = Actions.ActOnFriendFunctionDecl(CurScope, D, true,
+ FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D, true,
move(TemplateParams));
else // FIXME: pass template information through
- FnD = Actions.ActOnCXXMemberDeclarator(CurScope, AS, D,
+ FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
move(TemplateParams), 0, 0,
/*IsDefinition*/true);
@@ -48,7 +48,7 @@ Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D,
getCurrentClass().MethodDefs.push_back(LexedMethod(FnD));
getCurrentClass().MethodDefs.back().TemplateScope
- = CurScope->isTemplateParamScope();
+ = getCurScope()->isTemplateParamScope();
CachedTokens &Toks = getCurrentClass().MethodDefs.back().Toks;
tok::TokenKind kind = Tok.getKind();
@@ -95,7 +95,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate);
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
// The current scope is still active if we're the top-level class.
// Otherwise we'll need to push and enter a new scope.
@@ -103,7 +103,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
HasClassScope);
if (HasClassScope)
- Actions.ActOnStartDelayedMemberDeclarations(CurScope, Class.TagOrTemplate);
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
for (; !Class.MethodDecls.empty(); Class.MethodDecls.pop_front()) {
LateParsedMethodDeclaration &LM = Class.MethodDecls.front();
@@ -111,10 +111,10 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
// If this is a member template, introduce the template parameter scope.
ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
if (LM.TemplateScope)
- Actions.ActOnReenterTemplateScope(CurScope, LM.Method);
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
// Start the delayed C++ method declaration
- Actions.ActOnStartDelayedCXXMethodDeclaration(CurScope, LM.Method);
+ Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
// Introduce the parameters into scope and parse their default
// arguments.
@@ -122,7 +122,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
Scope::FunctionPrototypeScope|Scope::DeclScope);
for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
// Introduce the parameter into scope.
- Actions.ActOnDelayedCXXMethodParameter(CurScope, LM.DefaultArgs[I].Param);
+ Actions.ActOnDelayedCXXMethodParameter(getCurScope(), LM.DefaultArgs[I].Param);
if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
// Save the current token position.
@@ -151,7 +151,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
"ParseAssignmentExpression went over the default arg tokens!");
// There could be leftover tokens (e.g. because of an error).
// Skip through until we reach the original token position.
- while (Tok.getLocation() != origLoc)
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
ConsumeAnyToken();
delete Toks;
@@ -161,14 +161,14 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
PrototypeScope.Exit();
// Finish the delayed C++ method declaration.
- Actions.ActOnFinishDelayedCXXMethodDeclaration(CurScope, LM.Method);
+ Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
}
for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
ParseLexedMethodDeclarations(*Class.NestedClasses[I]);
if (HasClassScope)
- Actions.ActOnFinishDelayedMemberDeclarations(CurScope, Class.TagOrTemplate);
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
}
/// ParseLexedMethodDefs - We finished parsing the member specification of a top
@@ -178,7 +178,7 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
if (HasTemplateScope)
- Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate);
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
bool HasClassScope = !Class.TopLevelClass;
ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
@@ -190,7 +190,7 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
// If this is a member template, introduce the template parameter scope.
ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
if (LM.TemplateScope)
- Actions.ActOnReenterTemplateScope(CurScope, LM.D);
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
// Save the current token position.
SourceLocation origLoc = Tok.getLocation();
@@ -209,15 +209,17 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
// Parse the method body. Function body parsing code is similar enough
// to be re-used for method bodies as well.
ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
- Actions.ActOnStartOfFunctionDef(CurScope, LM.D);
+ Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
if (Tok.is(tok::kw_try)) {
ParseFunctionTryBlock(LM.D);
assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
Tok.getLocation()) &&
"ParseFunctionTryBlock went over the cached tokens!");
- assert(Tok.getLocation() == origLoc &&
- "ParseFunctionTryBlock left tokens in the token stream!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
continue;
}
if (Tok.is(tok::colon)) {
@@ -232,11 +234,19 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
Actions.ActOnDefaultCtorInitializers(LM.D);
ParseFunctionStatementBody(LM.D);
- assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
- Tok.getLocation()) &&
- "We consumed more than the cached tokens!");
- assert(Tok.getLocation() == origLoc &&
- "Tokens were left in the token stream!");
+
+ if (Tok.getLocation() != origLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ origLoc))
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ }
}
for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
index 3e7d4a1..62ef3ec 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -42,7 +42,7 @@ Action::TypeResult Parser::ParseTypeName(SourceRange *Range) {
if (DeclaratorInfo.isInvalidType())
return true;
- return Actions.ActOnTypeName(CurScope, DeclaratorInfo);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
/// ParseGNUAttributes - Parse a non-empty attributes list.
@@ -309,6 +309,8 @@ AttributeList* Parser::ParseMicrosoftTypeAttributes(AttributeList *CurrAttr) {
Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
SourceLocation &DeclEnd,
CXX0XAttributeList Attr) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
DeclPtrTy SingleDecl;
switch (Tok.getKind()) {
case tok::kw_template:
@@ -364,7 +366,7 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(unsigned Context,
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
if (RequireSemi) ConsumeToken();
- DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS_none,
+ DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
DS);
DS.complete(TheDecl);
return Actions.ConvertDeclToDeclGroup(TheDecl);
@@ -393,12 +395,14 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
return DeclGroupPtrTy();
}
- if (AllowFunctionDefinitions && D.isFunctionDeclarator()) {
- if (isDeclarationAfterDeclarator()) {
- // Fall though. We have to check this first, though, because
- // __attribute__ might be the start of a function definition in
- // (extended) K&R C.
- } else if (isStartOfFunctionDefinition()) {
+ // Check to see if we have a function *definition* which must have a body.
+ if (AllowFunctionDefinitions && D.isFunctionDeclarator() &&
+ // Look at the next token to make sure that this isn't a function
+ // declaration. We have to check this because __attribute__ might be the
+ // start of a function definition in GCC-extended K&R C.
+ !isDeclarationAfterDeclarator()) {
+
+ if (isStartOfFunctionDefinition(D)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -408,6 +412,14 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DeclPtrTy TheDecl = ParseFunctionDefinition(D);
return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ if (isDeclarationSpecifier()) {
+ // If there is an invalid declaration specifier right after the function
+ // prototype, then we must be in a missing semicolon case where this isn't
+ // actually a body. Just fall through into the code that handles it as a
+ // prototype, and let the top-level code handle the erroneous declspec
+ // where it would otherwise expect a comma or semicolon.
} else {
Diag(Tok, diag::err_expected_fn_body);
SkipUntil(tok::semi);
@@ -459,12 +471,17 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
Context == Declarator::FileContext
? diag::err_invalid_token_after_toplevel_declarator
: diag::err_expected_semi_declaration)) {
- SkipUntil(tok::r_brace, true, true);
- if (Tok.is(tok::semi))
- ConsumeToken();
+ // Okay, there was no semicolon and one was expected. If we see a
+ // declaration specifier, just assume it was missing and continue parsing.
+ // Otherwise things are very confused and we skip to recover.
+ if (!isDeclarationSpecifier()) {
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
}
- return Actions.FinalizeDeclaratorGroup(CurScope, DS,
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS,
DeclsInGroup.data(),
DeclsInGroup.size());
}
@@ -516,12 +533,12 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
DeclPtrTy ThisDecl;
switch (TemplateInfo.Kind) {
case ParsedTemplateInfo::NonTemplate:
- ThisDecl = Actions.ActOnDeclarator(CurScope, D);
+ ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
break;
case ParsedTemplateInfo::Template:
case ParsedTemplateInfo::ExplicitSpecialization:
- ThisDecl = Actions.ActOnTemplateDeclarator(CurScope,
+ ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
Action::MultiTemplateParamsArg(Actions,
TemplateInfo.TemplateParams->data(),
TemplateInfo.TemplateParams->size()),
@@ -530,7 +547,7 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
case ParsedTemplateInfo::ExplicitInstantiation: {
Action::DeclResult ThisRes
- = Actions.ActOnExplicitInstantiation(CurScope,
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
TemplateInfo.TemplateLoc,
D);
@@ -553,13 +570,20 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
} else {
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
EnterScope(0);
- Actions.ActOnCXXEnterDeclInitializer(CurScope, ThisDecl);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
}
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::comma, true, true);
+ return ThisDecl;
+ }
+
OwningExprResult Init(ParseInitializer());
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
- Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl);
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
@@ -577,14 +601,14 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
EnterScope(0);
- Actions.ActOnCXXEnterDeclInitializer(CurScope, ThisDecl);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
}
if (ParseExpressionList(Exprs, CommaLocs)) {
SkipUntil(tok::r_paren);
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
- Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl);
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
} else {
@@ -595,7 +619,7 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
"Unexpected number of commas!");
if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) {
- Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl);
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
ExitScope();
}
@@ -723,7 +747,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const char *TagName = 0;
tok::TokenKind TagKind = tok::unknown;
- switch (Actions.isTagName(*Tok.getIdentifierInfo(), CurScope)) {
+ switch (Actions.isTagName(*Tok.getIdentifierInfo(), getCurScope())) {
default: break;
case DeclSpec::TST_enum: TagName="enum" ;TagKind=tok::kw_enum ;break;
case DeclSpec::TST_union: TagName="union" ;TagKind=tok::kw_union ;break;
@@ -749,7 +773,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// diagnostic and attempt to recover.
Action::TypeTy *T = 0;
if (Actions.DiagnoseUnknownTypeName(*Tok.getIdentifierInfo(), Loc,
- CurScope, SS, T)) {
+ getCurScope(), SS, T)) {
// The action emitted a diagnostic, so we don't have to.
if (T) {
// The action has suggested that the type T could be used. Set that as
@@ -838,7 +862,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
else if (ObjCImpDecl)
CCC = Action::CCC_ObjCImplementation;
- Actions.CodeCompleteOrdinaryName(CurScope, CCC);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
ConsumeCodeCompletionToken();
}
@@ -908,7 +932,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if ((DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified())) &&
TemplateId->Name &&
- Actions.isCurrentClassName(*TemplateId->Name, CurScope, &SS)) {
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
if (isConstructorDeclarator()) {
// The user meant this to be an out-of-line constructor
// definition, but template arguments are not allowed
@@ -954,7 +978,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// check whether this is a constructor declaration.
if ((DSContext == DSC_top_level ||
(DSContext == DSC_class && DS.isFriendSpecified())) &&
- Actions.isCurrentClassName(*Next.getIdentifierInfo(), CurScope,
+ Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
&SS)) {
if (isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -970,7 +994,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
}
TypeTy *TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(),
- Next.getLocation(), CurScope, &SS);
+ Next.getLocation(), getCurScope(), &SS);
// If the referenced identifier is not a type, then this declspec is
// erroneous: We already checked about that it has no type specifier, and
@@ -1054,7 +1078,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// It has to be available as a typedef too!
TypeTy *TypeRep = Actions.getTypeName(*Tok.getIdentifierInfo(),
- Tok.getLocation(), CurScope);
+ Tok.getLocation(), getCurScope());
// If this is not a typedef name, don't parse it as part of the declspec,
// it must be an implicit int or an error.
@@ -1066,7 +1090,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// If we're in a context where the identifier could be a class name,
// check whether this is a constructor declaration.
if (getLang().CPlusPlus && DSContext == DSC_class &&
- Actions.isCurrentClassName(*Tok.getIdentifierInfo(), CurScope) &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -1114,7 +1138,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// constructor name or specialization, check whether this is a
// constructor declaration.
if (getLang().CPlusPlus && DSContext == DSC_class &&
- Actions.isCurrentClassName(*TemplateId->Name, CurScope) &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
isConstructorDeclarator())
goto DoneWithDeclSpec;
@@ -1677,7 +1701,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
// If there are no declarators, this is a free-standing declaration
// specifier. Let the actions module cope with it.
if (Tok.is(tok::semi)) {
- Actions.ParsedFreeStandingDeclSpec(CurScope, AS_none, DS);
+ Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none, DS);
return;
}
@@ -1753,7 +1777,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
SourceLocation LBraceLoc = ConsumeBrace();
ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
- Actions.ActOnTagStartDefinition(CurScope, TagDecl);
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
// Empty structs are an extension in C (C99 6.7.2.1p7), but are allowed in
// C++.
@@ -1770,6 +1794,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
Diag(Tok, diag::ext_extra_struct_semi)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
<< FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
continue;
@@ -1790,7 +1815,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
virtual DeclPtrTy invoke(FieldDeclarator &FD) {
// Install the declarator into the current TagDecl.
- DeclPtrTy Field = P.Actions.ActOnField(P.CurScope, TagDecl,
+ DeclPtrTy Field = P.Actions.ActOnField(P.getCurScope(), TagDecl,
FD.D.getDeclSpec().getSourceRange().getBegin(),
FD.D, FD.BitfieldSize);
FieldDecls.push_back(Field);
@@ -1814,7 +1839,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
continue;
}
llvm::SmallVector<DeclPtrTy, 16> Fields;
- Actions.ActOnDefs(CurScope, TagDecl, Tok.getLocation(),
+ Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
Tok.getIdentifierInfo(), Fields);
FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
ConsumeToken();
@@ -1842,12 +1867,12 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
if (Tok.is(tok::kw___attribute))
AttrList.reset(ParseGNUAttributes());
- Actions.ActOnFields(CurScope,
+ Actions.ActOnFields(getCurScope(),
RecordLoc, TagDecl, FieldDecls.data(), FieldDecls.size(),
LBraceLoc, RBraceLoc,
AttrList.get());
StructScope.Exit();
- Actions.ActOnTagFinishDefinition(CurScope, TagDecl, RBraceLoc);
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, RBraceLoc);
}
@@ -1869,7 +1894,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Parse the tag portion of this.
if (Tok.is(tok::code_completion)) {
// Code completion for an enum name.
- Actions.CodeCompleteTag(CurScope, DeclSpec::TST_enum);
+ Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
ConsumeCodeCompletionToken();
}
@@ -1943,7 +1968,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SourceLocation TSTLoc = NameLoc.isValid()? NameLoc : StartLoc;
const char *PrevSpec = 0;
unsigned DiagID;
- DeclPtrTy TagDecl = Actions.ActOnTag(CurScope, DeclSpec::TST_enum, TUK,
+ DeclPtrTy TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
StartLoc, SS, Name, NameLoc, Attr.get(),
AS,
Action::MultiTemplateParamsArg(Actions),
@@ -1957,7 +1982,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- TypeResult Type = Actions.ActOnDependentTag(CurScope, DeclSpec::TST_enum,
+ TypeResult Type = Actions.ActOnDependentTag(getCurScope(), DeclSpec::TST_enum,
TUK, SS, Name, StartLoc,
NameLoc);
if (Type.isInvalid()) {
@@ -2007,13 +2032,13 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) {
// Enter the scope of the enum body and start the definition.
ParseScope EnumScope(this, Scope::DeclScope);
- Actions.ActOnTagStartDefinition(CurScope, EnumDecl);
+ Actions.ActOnTagStartDefinition(getCurScope(), EnumDecl);
SourceLocation LBraceLoc = ConsumeBrace();
// C does not allow an empty enumerator-list, C++ does [dcl.enum].
if (Tok.is(tok::r_brace) && !getLang().CPlusPlus)
- Diag(Tok, diag::ext_empty_struct_union_enum) << "enum";
+ Diag(Tok, diag::error_empty_enum);
llvm::SmallVector<DeclPtrTy, 32> EnumConstantDecls;
@@ -2034,7 +2059,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) {
}
// Install the enumerator constant into EnumDecl.
- DeclPtrTy EnumConstDecl = Actions.ActOnEnumConstant(CurScope, EnumDecl,
+ DeclPtrTy EnumConstDecl = Actions.ActOnEnumConstant(getCurScope(), EnumDecl,
LastEnumConstDecl,
IdentLoc, Ident,
EqualLoc,
@@ -2063,10 +2088,10 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) {
Actions.ActOnEnumBody(StartLoc, LBraceLoc, RBraceLoc, EnumDecl,
EnumConstantDecls.data(), EnumConstantDecls.size(),
- CurScope, Attr.get());
+ getCurScope(), Attr.get());
EnumScope.Exit();
- Actions.ActOnTagFinishDefinition(CurScope, EnumDecl, RBraceLoc);
+ Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl, RBraceLoc);
}
/// isTypeSpecifierQualifier - Return true if the current token could be the
@@ -2351,7 +2376,7 @@ bool Parser::isConstructorDeclarator() {
// If we need to, enter the specified scope.
DeclaratorScopeObj DeclScopeObj(*this, SS);
- if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(CurScope, SS))
+ if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
DeclScopeObj.EnterDeclaratorScope();
// Check whether the next token(s) are part of a declaration
@@ -2640,7 +2665,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
if (D.getCXXScopeSpec().isValid()) {
- if (Actions.ShouldEnterDeclaratorScope(CurScope, D.getCXXScopeSpec()))
+ if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec()))
// Change the declaration context for name lookup, until this function
// is exited (and the declarator has been parsed).
DeclScopeObj.EnterDeclaratorScope();
@@ -2699,7 +2724,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// scope when parsing the parenthesized declarator, then exited
// the scope already. Re-enter the scope, if we need to.
if (D.getCXXScopeSpec().isSet()) {
- if (Actions.ShouldEnterDeclaratorScope(CurScope, D.getCXXScopeSpec()))
+ if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec()))
// Change the declaration context for name lookup, until this function
// is exited (and the declarator has been parsed).
DeclScopeObj.EnterDeclaratorScope();
@@ -3036,7 +3061,7 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.
- DeclPtrTy Param = Actions.ActOnParamDeclarator(CurScope, ParmDecl);
+ DeclPtrTy Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
// Parse the default argument, if any. We parse the default
// arguments in all dialects; the semantic analysis in
@@ -3194,7 +3219,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc,
IdentifierInfo *ParmII = Tok.getIdentifierInfo();
// Reject 'typedef int y; int test(x, y)', but continue parsing.
- if (Actions.getTypeName(*ParmII, Tok.getLocation(), CurScope))
+ if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope()))
Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
// Verify that the argument identifier has not already been mentioned.
@@ -3458,7 +3483,7 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
default:
break;
}
- } else if (Tok.getIdentifierInfo() == Ident_pixel &&
+ } else if ((Tok.getIdentifierInfo() == Ident_pixel) &&
DS.isTypeAltiVecVector()) {
isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
return true;
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
index 479c04c..590ba6c 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -49,7 +49,7 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context,
SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteNamespaceDecl(CurScope);
+ Actions.CodeCompleteNamespaceDecl(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -87,9 +87,9 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context,
SourceLocation LBrace = ConsumeBrace();
- if (CurScope->isClassScope() || CurScope->isTemplateParamScope() ||
- CurScope->isInObjcMethodScope() || CurScope->getBlockParent() ||
- CurScope->getFnParent()) {
+ if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() ||
+ getCurScope()->isInObjcMethodScope() || getCurScope()->getBlockParent() ||
+ getCurScope()->getFnParent()) {
Diag(LBrace, diag::err_namespace_nonnamespace_scope);
SkipUntil(tok::r_brace, false);
return DeclPtrTy();
@@ -99,7 +99,7 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context,
ParseScope NamespaceScope(this, Scope::DeclScope);
DeclPtrTy NamespcDecl =
- Actions.ActOnStartNamespaceDef(CurScope, IdentLoc, Ident, LBrace,
+ Actions.ActOnStartNamespaceDef(getCurScope(), IdentLoc, Ident, LBrace,
AttrList.get());
PrettyStackTraceActionsDecl CrashInfo(NamespcDecl, NamespaceLoc, Actions,
@@ -135,7 +135,7 @@ Parser::DeclPtrTy Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
ConsumeToken(); // eat the '='.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteNamespaceAliasDecl(CurScope);
+ Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -159,7 +159,7 @@ Parser::DeclPtrTy Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name,
"", tok::semi);
- return Actions.ActOnNamespaceAliasDef(CurScope, NamespaceLoc, AliasLoc, Alias,
+ return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc, Alias,
SS, IdentLoc, Ident);
}
@@ -184,7 +184,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS,
ParseScope LinkageScope(this, Scope::DeclScope);
DeclPtrTy LinkageSpec
- = Actions.ActOnStartLinkageSpecification(CurScope,
+ = Actions.ActOnStartLinkageSpecification(getCurScope(),
/*FIXME: */SourceLocation(),
Loc, Lang,
Tok.is(tok::l_brace)? Tok.getLocation()
@@ -197,7 +197,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS,
if (Tok.isNot(tok::l_brace)) {
ParseDeclarationOrFunctionDefinition(DS, Attr.AttrList);
- return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec,
+ return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
SourceLocation());
}
@@ -216,7 +216,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS,
}
SourceLocation RBrace = MatchRHSPunctuation(tok::r_brace, LBrace);
- return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec, RBrace);
+ return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec, RBrace);
}
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
@@ -230,7 +230,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
SourceLocation UsingLoc = ConsumeToken();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteUsing(CurScope);
+ Actions.CodeCompleteUsing(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -267,7 +267,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirective(unsigned Context,
SourceLocation NamespcLoc = ConsumeToken();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteUsingDirective(CurScope);
+ Actions.CodeCompleteUsingDirective(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -304,7 +304,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirective(unsigned Context,
GNUAttr ? diag::err_expected_semi_after_attribute_list :
diag::err_expected_semi_after_namespace_name, "", tok::semi);
- return Actions.ActOnUsingDirective(CurScope, UsingLoc, NamespcLoc, SS,
+ return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
IdentLoc, NamespcName, Attr);
}
@@ -368,7 +368,7 @@ Parser::DeclPtrTy Parser::ParseUsingDeclaration(unsigned Context,
AttrList ? "attributes list" : "using declaration",
tok::semi);
- return Actions.ActOnUsingDeclaration(CurScope, AS, true, UsingLoc, SS, Name,
+ return Actions.ActOnUsingDeclaration(getCurScope(), AS, true, UsingLoc, SS, Name,
AttrList.get(), IsTypeName, TypenameLoc);
}
@@ -508,7 +508,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
// template-name was wrong. Try to fix that.
TemplateNameKind TNK = TNK_Type_template;
TemplateTy Template;
- if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, CurScope,
+ if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
SS, Template, TNK)) {
Diag(IdLoc, diag::err_unknown_template_name)
<< Id;
@@ -542,7 +542,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
}
// We have an identifier; check whether it is actually a type.
- TypeTy *Type = Actions.getTypeName(*Id, IdLoc, CurScope, SS, true);
+ TypeTy *Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), SS, true);
if (!Type) {
Diag(IdLoc, diag::err_expected_class_name);
return true;
@@ -609,10 +609,24 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (Tok.is(tok::code_completion)) {
// Code completion for a struct, class, or union name.
- Actions.CodeCompleteTag(CurScope, TagType);
+ Actions.CodeCompleteTag(getCurScope(), TagType);
ConsumeCodeCompletionToken();
}
+ // C++03 [temp.explicit] 14.7.2/8:
+ // The usual access checking rules do not apply to names used to specify
+ // explicit instantiations.
+ //
+ // As an extension we do not perform access checking on the names used to
+ // specify explicit specializations either. This is important to allow
+ // specializing traits classes for private types.
+ bool SuppressingAccessChecks = false;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization) {
+ Actions.ActOnStartSuppressingAccessChecks();
+ SuppressingAccessChecks = true;
+ }
+
AttributeList *AttrList = 0;
// If attributes exist after tag, parse them.
if (Tok.is(tok::kw___attribute))
@@ -670,7 +684,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Name = Tok.getIdentifierInfo();
NameLoc = ConsumeToken();
- if (Tok.is(tok::less)) {
+ if (Tok.is(tok::less) && getLang().CPlusPlus) {
// The name was supposed to refer to a template, but didn't.
// Eat the template argument list and try to continue parsing this as
// a class (or template thereof).
@@ -713,8 +727,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
const_cast<ParsedTemplateInfo&>(TemplateInfo).ExternLoc
= SourceLocation();
}
-
-
}
} else if (Tok.is(tok::annot_template_id)) {
TemplateId = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
@@ -734,10 +746,18 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DS.SetTypeSpecError();
SkipUntil(tok::semi, false, true);
TemplateId->Destroy();
+ if (SuppressingAccessChecks)
+ Actions.ActOnStopSuppressingAccessChecks();
+
return;
}
}
+ // As soon as we're finished parsing the class's template-id, turn access
+ // checking back on.
+ if (SuppressingAccessChecks)
+ Actions.ActOnStopSuppressingAccessChecks();
+
// There are four options here. If we have 'struct foo;', then this
// is either a forward declaration or a friend declaration, which
// have to be treated differently. If we have 'struct foo {...' or
@@ -799,7 +819,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TUK == Action::TUK_Declaration) {
// This is an explicit instantiation of a class template.
TagOrTempResult
- = Actions.ActOnExplicitInstantiation(CurScope,
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
TemplateInfo.TemplateLoc,
TagType,
@@ -865,7 +885,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Build the class template specialization.
TagOrTempResult
- = Actions.ActOnClassTemplateSpecialization(CurScope, TagType, TUK,
+ = Actions.ActOnClassTemplateSpecialization(getCurScope(), TagType, TUK,
StartLoc, SS,
TemplateTy::make(TemplateId->Template),
TemplateId->TemplateNameLoc,
@@ -886,7 +906,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// template struct Outer<int>::Inner;
//
TagOrTempResult
- = Actions.ActOnExplicitInstantiation(CurScope,
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
TemplateInfo.ExternLoc,
TemplateInfo.TemplateLoc,
TagType, StartLoc, SS, Name,
@@ -900,7 +920,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
bool IsDependent = false;
// Declaration or definition of a class type
- TagOrTempResult = Actions.ActOnTag(CurScope, TagType, TUK, StartLoc, SS,
+ TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc, SS,
Name, NameLoc, AttrList, AS,
Action::MultiTemplateParamsArg(Actions,
TemplateParams? &(*TemplateParams)[0] : 0,
@@ -910,7 +930,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If ActOnTag said the type was dependent, try again with the
// less common call.
if (IsDependent)
- TypeResult = Actions.ActOnDependentTag(CurScope, TagType, TUK,
+ TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
SS, Name, StartLoc, NameLoc);
}
@@ -1152,7 +1172,7 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
getCurrentClass().MethodDecls.push_back(
LateParsedMethodDeclaration(ThisDecl));
LateMethod = &getCurrentClass().MethodDecls.back();
- LateMethod->TemplateScope = CurScope->isTemplateParamScope();
+ LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
// Add all of the parameters prior to this one (they don't
// have default arguments).
@@ -1229,7 +1249,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
tok::semi))
return;
- Actions.ActOnUsingDeclaration(CurScope, AS,
+ Actions.ActOnUsingDeclaration(getCurScope(), AS,
false, SourceLocation(),
SS, Name,
/* AttrList */ 0,
@@ -1307,7 +1327,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (Tok.is(tok::semi)) {
ConsumeToken();
- Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS);
+ Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
return;
}
@@ -1375,7 +1395,6 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// declarator pure-specifier[opt]
// declarator constant-initializer[opt]
// identifier[opt] ':' constant-expression
-
if (Tok.is(tok::colon)) {
ConsumeToken();
BitfieldSize = ParseConstantExpression();
@@ -1392,7 +1411,6 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// defaulted/deleted function-definition:
// '=' 'default' [TODO]
// '=' 'delete'
-
if (Tok.is(tok::equal)) {
ConsumeToken();
if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) {
@@ -1405,6 +1423,17 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
}
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ OwningExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid())
+ SkipUntil(tok::comma, true, true);
+
+ DeclaratorInfo.setAsmLabel(AsmLabel.release());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
// If attributes exist after the declarator, parse them.
if (Tok.is(tok::kw___attribute)) {
SourceLocation Loc;
@@ -1419,11 +1448,11 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DeclPtrTy ThisDecl;
if (DS.isFriendSpecified()) {
// TODO: handle initializers, bitfields, 'delete'
- ThisDecl = Actions.ActOnFriendFunctionDecl(CurScope, DeclaratorInfo,
+ ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
/*IsDefinition*/ false,
move(TemplateParams));
} else {
- ThisDecl = Actions.ActOnCXXMemberDeclarator(CurScope, AS,
+ ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
DeclaratorInfo,
move(TemplateParams),
BitfieldSize.release(),
@@ -1475,7 +1504,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return;
}
- Actions.FinalizeDeclaratorGroup(CurScope, DS, DeclsInGroup.data(),
+ Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup.data(),
DeclsInGroup.size());
}
@@ -1499,7 +1528,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// classes are *not* considered to be nested classes.
bool NonNestedClass = true;
if (!ClassStack.empty()) {
- for (const Scope *S = CurScope; S; S = S->getParent()) {
+ for (const Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->isClassScope()) {
// We're inside a class scope, so this is a nested class.
NonNestedClass = false;
@@ -1526,7 +1555,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass);
if (TagDecl)
- Actions.ActOnTagStartDefinition(CurScope, TagDecl);
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
if (Tok.is(tok::colon)) {
ParseBaseClause(TagDecl);
@@ -1535,7 +1564,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
Diag(Tok, diag::err_expected_lbrace_after_base_specifiers);
if (TagDecl)
- Actions.ActOnTagDefinitionError(CurScope, TagDecl);
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
return;
}
}
@@ -1544,12 +1573,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation LBraceLoc = ConsumeBrace();
- if (!TagDecl) {
- SkipUntil(tok::r_brace, false, false);
- return;
- }
-
- Actions.ActOnStartCXXMemberDeclarations(CurScope, TagDecl, LBraceLoc);
+ if (TagDecl)
+ Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, LBraceLoc);
// C++ 11p3: Members of a class defined with the keyword class are private
// by default. Members of a class defined with the keywords struct or union
@@ -1560,43 +1585,55 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
else
CurAS = AS_public;
- // While we still have something to read, read the member-declarations.
- while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
- // Each iteration of this loop reads one member-declaration.
+ SourceLocation RBraceLoc;
+ if (TagDecl) {
+ // While we still have something to read, read the member-declarations.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one member-declaration.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ continue;
+ }
- // Check for extraneous top-level semicolon.
- if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_struct_semi)
- << FixItHint::CreateRemoval(Tok.getLocation());
- ConsumeToken();
- continue;
- }
+ AccessSpecifier AS = getAccessSpecifierIfPresent();
+ if (AS != AS_none) {
+ // Current token is a C++ access specifier.
+ CurAS = AS;
+ SourceLocation ASLoc = Tok.getLocation();
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation());
+ else
+ Diag(Tok, diag::err_expected_colon);
+ ConsumeToken();
+ continue;
+ }
- AccessSpecifier AS = getAccessSpecifierIfPresent();
- if (AS != AS_none) {
- // Current token is a C++ access specifier.
- CurAS = AS;
- ConsumeToken();
- ExpectAndConsume(tok::colon, diag::err_expected_colon);
- continue;
- }
+ // FIXME: Make sure we don't have a template here.
- // FIXME: Make sure we don't have a template here.
+ // Parse all the comma separated declarators.
+ ParseCXXClassMemberDeclaration(CurAS);
+ }
- // Parse all the comma separated declarators.
- ParseCXXClassMemberDeclaration(CurAS);
+ RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+ } else {
+ SkipUntil(tok::r_brace, false, false);
}
- SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
-
// If attributes exist after class contents, parse them.
llvm::OwningPtr<AttributeList> AttrList;
if (Tok.is(tok::kw___attribute))
AttrList.reset(ParseGNUAttributes());
- Actions.ActOnFinishCXXMemberSpecification(CurScope, RecordLoc, TagDecl,
- LBraceLoc, RBraceLoc,
- AttrList.get());
+ if (TagDecl)
+ Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl,
+ LBraceLoc, RBraceLoc,
+ AttrList.get());
// C++ 9.2p2: Within the class member-specification, the class is regarded as
// complete within function bodies, default arguments,
@@ -1605,15 +1642,18 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
//
// FIXME: Only function bodies and constructor ctor-initializers are
// parsed correctly, fix the rest.
- if (NonNestedClass) {
+ if (TagDecl && NonNestedClass) {
// We are not inside a nested class. This class and its nested classes
// are complete and we can parse the delayed portions of method
// declarations and the lexed inline method definitions.
+ SourceLocation SavedPrevTokLocation = PrevTokLocation;
ParseLexedMethodDeclarations(getCurrentClass());
ParseLexedMethodDefs(getCurrentClass());
+ PrevTokLocation = SavedPrevTokLocation;
}
- Actions.ActOnTagFinishDefinition(CurScope, TagDecl, RBraceLoc);
+ if (TagDecl)
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, RBraceLoc);
// Leave the class scope.
ParsingDef.Pop();
@@ -1726,7 +1766,7 @@ Parser::MemInitResult Parser::ParseMemInitializer(DeclPtrTy ConstructorDecl) {
SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
- return Actions.ActOnMemInitializer(ConstructorDecl, CurScope, SS, II,
+ return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
TemplateTypeTy, IdLoc,
LParenLoc, ArgExprs.take(),
ArgExprs.size(), CommaLocs.data(),
@@ -1840,9 +1880,9 @@ void Parser::PopParsingClass() {
// This nested class has some members that will need to be processed
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
- assert(CurScope->isClassScope() && "Nested class outside of class scope?");
+ assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
ClassStack.top()->NestedClasses.push_back(Victim);
- Victim->TemplateScope = CurScope->getParent()->isTemplateParamScope();
+ Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
}
/// ParseCXX0XAttributes - Parse a C++0x attribute-specifier. Currently only
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
index b036e56..e7973f7 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -210,7 +210,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
if (LHS.isInvalid()) return move(LHS);
}
- LHS = Actions.ActOnUnaryOp(CurScope, ExtLoc, tok::kw___extension__,
+ LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
move(LHS));
if (LHS.isInvalid()) return move(LHS);
@@ -221,7 +221,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
///
Parser::OwningExprResult Parser::ParseAssignmentExpression() {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Expression);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Expression);
ConsumeCodeCompletionToken();
}
@@ -343,6 +343,14 @@ Parser::ParseRHSOfBinaryExpression(OwningExprResult LHS, prec::Level MinPrec) {
}
}
+ // Code completion for the right-hand side of an assignment expression
+ // goes through a special hook that takes the left-hand side into account.
+ if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) {
+ Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get());
+ ConsumeCodeCompletionToken();
+ return ExprError();
+ }
+
// Parse another leaf here for the RHS of the operator.
// ParseCastExpression works here because all RHS expressions in C have it
// as a prefix, at least. However, in C++, an assignment-expression could
@@ -399,7 +407,7 @@ Parser::ParseRHSOfBinaryExpression(OwningExprResult LHS, prec::Level MinPrec) {
SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
Actions.getExprRange(RHS.get()).getEnd()));
- LHS = Actions.ActOnBinOp(CurScope, OpToken.getLocation(),
+ LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
OpToken.getKind(), move(LHS), move(RHS));
} else
LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
@@ -572,7 +580,8 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
TypeOfCast, CastTy, RParenLoc);
- if (Res.isInvalid()) return move(Res);
+ if (Res.isInvalid())
+ return move(Res);
}
switch (ParenExprType) {
@@ -638,9 +647,9 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// Support 'Class.property' and 'super.property' notation.
if (getLang().ObjC1 && Tok.is(tok::period) &&
- (Actions.getTypeName(II, ILoc, CurScope) ||
+ (Actions.getTypeName(II, ILoc, getCurScope()) ||
// Allow the base to be 'super' if in an objc-method.
- (&II == Ident_super && CurScope->isInObjcMethodScope()))) {
+ (&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
SourceLocation DotLoc = ConsumeToken();
if (Tok.isNot(tok::identifier)) {
@@ -662,8 +671,9 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
UnqualifiedId Name;
CXXScopeSpec ScopeSpec;
Name.setIdentifier(&II, ILoc);
- Res = Actions.ActOnIdExpression(CurScope, ScopeSpec, Name,
+ Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, Name,
Tok.is(tok::l_paren), false);
+
// These can be followed by postfix-expr pieces.
return ParsePostfixExpressionSuffix(move(Res));
}
@@ -698,7 +708,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation SavedLoc = ConsumeToken();
Res = ParseCastExpression(true);
if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res));
return move(Res);
}
case tok::amp: { // unary-expression: '&' cast-expression
@@ -706,7 +716,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation SavedLoc = ConsumeToken();
Res = ParseCastExpression(false, true);
if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res));
return move(Res);
}
@@ -720,7 +730,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation SavedLoc = ConsumeToken();
Res = ParseCastExpression(false);
if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res));
return move(Res);
}
@@ -730,7 +740,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
SourceLocation SavedLoc = ConsumeToken();
Res = ParseCastExpression(false);
if (!Res.isInvalid())
- Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res));
return move(Res);
}
case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
@@ -905,7 +915,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
case tok::caret:
return ParsePostfixExpressionSuffix(ParseBlockLiteralExpression());
case tok::code_completion:
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Expression);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Expression);
ConsumeCodeCompletionToken();
return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
NotCastExpr, TypeOfCast);
@@ -951,13 +961,23 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
default: // Not a postfix-expression suffix.
return move(LHS);
case tok::l_square: { // postfix-expression: p-e '[' expression ']'
+ // If we have a array postfix expression that starts on a new line and
+ // Objective-C is enabled, it is highly likely that the user forgot a
+ // semicolon after the base expression and that the array postfix-expr is
+ // actually another message send. In this case, do some look-ahead to see
+ // if the contents of the square brackets are obviously not a valid
+ // expression and recover by pretending there is no suffix.
+ if (getLang().ObjC1 && Tok.isAtStartOfLine() &&
+ isSimpleObjCMessageExpression())
+ return move(LHS);
+
Loc = ConsumeBracket();
OwningExprResult Idx(ParseExpression());
SourceLocation RLoc = Tok.getLocation();
if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) {
- LHS = Actions.ActOnArraySubscriptExpr(CurScope, move(LHS), Loc,
+ LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), move(LHS), Loc,
move(Idx), RLoc);
} else
LHS = ExprError();
@@ -973,8 +993,13 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
Loc = ConsumeParen();
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteCall(CurScope, LHS.get(), 0, 0);
+ Actions.CodeCompleteCall(getCurScope(), LHS.get(), 0, 0);
ConsumeCodeCompletionToken();
}
@@ -995,7 +1020,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
if (!LHS.isInvalid()) {
assert((ArgExprs.size() == 0 || ArgExprs.size()-1 == CommaLocs.size())&&
"Unexpected number of commas!");
- LHS = Actions.ActOnCallExpr(CurScope, move(LHS), Loc,
+ LHS = Actions.ActOnCallExpr(getCurScope(), move(LHS), Loc,
move_arg(ArgExprs), CommaLocs.data(),
Tok.getLocation());
}
@@ -1014,7 +1039,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
Action::TypeTy *ObjectType = 0;
bool MayBePseudoDestructor = false;
if (getLang().CPlusPlus && !LHS.isInvalid()) {
- LHS = Actions.ActOnStartCXXMemberReference(CurScope, move(LHS),
+ LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), move(LHS),
OpLoc, OpKind, ObjectType,
MayBePseudoDestructor);
if (LHS.isInvalid())
@@ -1022,11 +1047,13 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
ParseOptionalCXXScopeSpecifier(SS, ObjectType, false,
&MayBePseudoDestructor);
+ if (SS.isNotEmpty())
+ ObjectType = 0;
}
if (Tok.is(tok::code_completion)) {
// Code completion for a member access expression.
- Actions.CodeCompleteMemberReferenceExpr(CurScope, LHS.get(),
+ Actions.CodeCompleteMemberReferenceExpr(getCurScope(), LHS.get(),
OpLoc, OpKind == tok::arrow);
ConsumeCodeCompletionToken();
@@ -1053,7 +1080,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
return ExprError();
if (!LHS.isInvalid())
- LHS = Actions.ActOnMemberAccessExpr(CurScope, move(LHS), OpLoc,
+ LHS = Actions.ActOnMemberAccessExpr(getCurScope(), move(LHS), OpLoc,
OpKind, SS, Name, ObjCImpDecl,
Tok.is(tok::l_paren));
break;
@@ -1061,7 +1088,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
case tok::plusplus: // postfix-expression: postfix-expression '++'
case tok::minusminus: // postfix-expression: postfix-expression '--'
if (!LHS.isInvalid()) {
- LHS = Actions.ActOnPostfixUnaryOp(CurScope, Tok.getLocation(),
+ LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(),
Tok.getKind(), move(LHS));
}
ConsumeToken();
@@ -1309,7 +1336,7 @@ Parser::OwningExprResult Parser::ParseBuiltinPrimaryExpression() {
} else if (Ty.isInvalid()) {
Res = ExprError();
} else {
- Res = Actions.ActOnBuiltinOffsetOf(CurScope, StartLoc, TypeLoc,
+ Res = Actions.ActOnBuiltinOffsetOf(getCurScope(), StartLoc, TypeLoc,
Ty.get(), &Comps[0],
Comps.size(), ConsumeParen());
}
@@ -1451,7 +1478,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Reject the cast of super idiom in ObjC.
if (Tok.is(tok::identifier) && getLang().ObjC1 &&
Tok.getIdentifierInfo() == Ident_super &&
- CurScope->isInObjcMethodScope() &&
+ getCurScope()->isInObjcMethodScope() &&
GetLookAheadToken(1).isNot(tok::period)) {
Diag(Tok.getLocation(), diag::err_illegal_super_cast)
<< SourceRange(OpenLoc, RParenLoc);
@@ -1462,7 +1489,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// TODO: For cast expression with CastTy.
Result = ParseCastExpression(false, false, CastTy);
if (!Result.isInvalid())
- Result = Actions.ActOnCastExpr(CurScope, OpenLoc, CastTy, RParenLoc,
+ Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc, CastTy, RParenLoc,
move(Result));
return move(Result);
}
@@ -1561,7 +1588,7 @@ bool Parser::ParseExpressionList(ExprListTy &Exprs, CommaLocsTy &CommaLocs,
while (1) {
if (Tok.is(tok::code_completion)) {
if (Completer)
- (Actions.*Completer)(CurScope, Data, Exprs.data(), Exprs.size());
+ (Actions.*Completer)(getCurScope(), Data, Exprs.data(), Exprs.size());
ConsumeCodeCompletionToken();
}
@@ -1603,7 +1630,7 @@ void Parser::ParseBlockId() {
}
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(DeclaratorInfo, CurScope);
+ Actions.ActOnBlockArguments(DeclaratorInfo, getCurScope());
}
/// ParseBlockLiteralExpression - Parse a block literal, which roughly looks
@@ -1631,7 +1658,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
Scope::DeclScope);
// Inform sema that we are starting a block.
- Actions.ActOnBlockStart(CaretLoc, CurScope);
+ Actions.ActOnBlockStart(CaretLoc, getCurScope());
// Parse the return type if present.
DeclSpec DS;
@@ -1654,7 +1681,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
// If there was an error parsing the arguments, they may have
// tried to use ^(x+y) which requires an argument list. Just
// skip the whole block literal.
- Actions.ActOnBlockError(CaretLoc, CurScope);
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
return ExprError();
}
@@ -1665,7 +1692,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
}
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(ParamInfo, CurScope);
+ Actions.ActOnBlockArguments(ParamInfo, getCurScope());
} else if (!Tok.is(tok::l_brace)) {
ParseBlockId();
} else {
@@ -1686,7 +1713,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
}
// Inform sema that we are starting a block.
- Actions.ActOnBlockArguments(ParamInfo, CurScope);
+ Actions.ActOnBlockArguments(ParamInfo, getCurScope());
}
@@ -1694,14 +1721,14 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
if (!Tok.is(tok::l_brace)) {
// Saw something like: ^expr
Diag(Tok, diag::err_expected_expression);
- Actions.ActOnBlockError(CaretLoc, CurScope);
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
return ExprError();
}
OwningStmtResult Stmt(ParseCompoundStatementBody());
if (!Stmt.isInvalid())
- Result = Actions.ActOnBlockStmtExpr(CaretLoc, move(Stmt), CurScope);
+ Result = Actions.ActOnBlockStmtExpr(CaretLoc, move(Stmt), getCurScope());
else
- Actions.ActOnBlockError(CaretLoc, CurScope);
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
return move(Result);
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
index 46f1d94..579d3bd 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -81,7 +81,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// '::' - Global scope qualifier.
SourceLocation CCLoc = ConsumeToken();
SS.setBeginLoc(CCLoc);
- SS.setScopeRep(Actions.ActOnCXXGlobalScopeSpecifier(CurScope, CCLoc));
+ SS.setScopeRep(Actions.ActOnCXXGlobalScopeSpecifier(getCurScope(), CCLoc));
SS.setEndLoc(CCLoc);
HasScopeSpecifier = true;
}
@@ -109,7 +109,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (Tok.is(tok::code_completion)) {
// Code completion for a nested-name-specifier, where the code
// code completion token follows the '::'.
- Actions.CodeCompleteQualifiedId(CurScope, SS, EnteringContext);
+ Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext);
ConsumeCodeCompletionToken();
}
}
@@ -164,13 +164,18 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// Commit to parsing the template-id.
TPA.Commit();
- TemplateTy Template
- = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, TemplateName,
- ObjectType, EnteringContext);
- if (!Template)
- return true;
- if (AnnotateTemplateIdToken(Template, TNK_Dependent_template_name,
- &SS, TemplateName, TemplateKWLoc, false))
+ TemplateTy Template;
+ if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ TemplateKWLoc,
+ SS,
+ TemplateName,
+ ObjectType,
+ EnteringContext,
+ Template)) {
+ if (AnnotateTemplateIdToken(Template, TNK, &SS, TemplateName,
+ TemplateKWLoc, false))
+ return true;
+ } else
return true;
continue;
@@ -209,7 +214,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (TypeToken.getAnnotationValue())
SS.setScopeRep(
- Actions.ActOnCXXNestedNameSpecifier(CurScope, SS,
+ Actions.ActOnCXXNestedNameSpecifier(getCurScope(), SS,
TypeToken.getAnnotationValue(),
TypeToken.getAnnotationRange(),
CCLoc));
@@ -239,7 +244,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// If we get foo:bar, this is almost certainly a typo for foo::bar. Recover
// and emit a fixit hint for it.
if (Next.is(tok::colon) && !ColonIsSacred) {
- if (Actions.IsInvalidUnlessNestedName(CurScope, SS, II, ObjectType,
+ if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, II, ObjectType,
EnteringContext) &&
// If the token after the colon isn't an identifier, it's still an
// error, but they probably meant something else strange so don't
@@ -255,7 +260,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (Next.is(tok::coloncolon)) {
if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
- !Actions.isNonTypeNestedNameSpecifier(CurScope, SS, Tok.getLocation(),
+ !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, Tok.getLocation(),
II, ObjectType)) {
*MayBePseudoDestructor = true;
return false;
@@ -273,12 +278,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
HasScopeSpecifier = true;
}
- if (SS.isInvalid())
- continue;
-
- SS.setScopeRep(
- Actions.ActOnCXXNestedNameSpecifier(CurScope, SS, IdLoc, CCLoc, II,
- ObjectType, EnteringContext));
+ if (!SS.isInvalid())
+ SS.setScopeRep(
+ Actions.ActOnCXXNestedNameSpecifier(getCurScope(), SS, IdLoc, CCLoc, II,
+ ObjectType, EnteringContext));
SS.setEndLoc(CCLoc);
continue;
}
@@ -290,7 +293,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
TemplateName.setIdentifier(&II, Tok.getLocation());
bool MemberOfUnknownSpecialization;
- if (TemplateNameKind TNK = Actions.isTemplateName(CurScope, SS,
+ if (TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
TemplateName,
ObjectType,
EnteringContext,
@@ -319,18 +322,20 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
<< II.getName()
<< FixItHint::CreateInsertion(Tok.getLocation(), "template ");
- Template = Actions.ActOnDependentTemplateName(Tok.getLocation(), SS,
- TemplateName, ObjectType,
- EnteringContext);
- if (!Template.get())
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ Tok.getLocation(), SS,
+ TemplateName, ObjectType,
+ EnteringContext, Template)) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, &SS, TemplateName,
+ SourceLocation(), false))
+ return true;
+ }
+ else
return true;
-
- // Consume the identifier.
- ConsumeToken();
- if (AnnotateTemplateIdToken(Template, TNK_Dependent_template_name, &SS,
- TemplateName, SourceLocation(), false))
- return true;
-
+
continue;
}
}
@@ -426,7 +431,7 @@ Parser::OwningExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
}
}
- return Actions.ActOnIdExpression(CurScope, SS, Name, Tok.is(tok::l_paren),
+ return Actions.ActOnIdExpression(getCurScope(), SS, Name, Tok.is(tok::l_paren),
isAddressOfOperand);
}
@@ -607,7 +612,7 @@ Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
/*TemplateKWLoc*/SourceLocation()))
return ExprError();
- return Actions.ActOnPseudoDestructorExpr(CurScope, move(Base), OpLoc, OpKind,
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), move(Base), OpLoc, OpKind,
SS, FirstTypeName, CCLoc,
TildeLoc, SecondTypeName,
Tok.is(tok::l_paren));
@@ -673,7 +678,7 @@ Parser::OwningExprResult Parser::ParseCXXThis() {
Parser::OwningExprResult
Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
- TypeTy *TypeRep = Actions.ActOnTypeName(CurScope, DeclaratorInfo).get();
+ TypeTy *TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert(Tok.is(tok::l_paren) && "Expected '('!");
SourceLocation LParenLoc = ConsumeParen();
@@ -728,7 +733,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult,
SourceLocation Loc,
bool ConvertToBoolean) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Condition);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Condition);
ConsumeCodeCompletionToken();
}
@@ -742,7 +747,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult,
// If required, convert to a boolean value.
if (ConvertToBoolean)
ExprResult
- = Actions.ActOnBooleanCondition(CurScope, Loc, move(ExprResult));
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, move(ExprResult));
return ExprResult.isInvalid();
}
@@ -774,7 +779,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult,
}
// Type-check the declaration itself.
- Action::DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(CurScope,
+ Action::DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(getCurScope(),
DeclaratorInfo);
DeclResult = Dcl.get();
ExprResult = ExprError();
@@ -1011,15 +1016,14 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
case UnqualifiedId::IK_OperatorFunctionId:
case UnqualifiedId::IK_LiteralOperatorId:
if (AssumeTemplateId) {
- Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS,
- Id, ObjectType,
- EnteringContext);
- TNK = TNK_Dependent_template_name;
- if (!Template.get())
- return true;
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS,
+ Id, ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
} else {
bool MemberOfUnknownSpecialization;
- TNK = Actions.isTemplateName(CurScope, SS, Id, ObjectType,
+ TNK = Actions.isTemplateName(getCurScope(), SS, Id, ObjectType,
EnteringContext, Template,
MemberOfUnknownSpecialization);
@@ -1042,11 +1046,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
<< Name
<< FixItHint::CreateInsertion(Id.StartLocation, "template ");
- Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS,
- Id, ObjectType,
- EnteringContext);
- TNK = TNK_Dependent_template_name;
- if (!Template.get())
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc,
+ SS, Id, ObjectType,
+ EnteringContext, Template);
+ if (TNK == TNK_Non_template)
return true;
}
}
@@ -1056,7 +1059,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
UnqualifiedId TemplateName;
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
- TNK = Actions.isTemplateName(CurScope, SS, TemplateName, ObjectType,
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateName, ObjectType,
EnteringContext, Template,
MemberOfUnknownSpecialization);
break;
@@ -1067,14 +1070,13 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
bool MemberOfUnknownSpecialization;
TemplateName.setIdentifier(Name, NameLoc);
if (ObjectType) {
- Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS,
- TemplateName, ObjectType,
- EnteringContext);
- TNK = TNK_Dependent_template_name;
- if (!Template.get())
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS,
+ TemplateName, ObjectType,
+ EnteringContext, Template);
+ if (TNK == TNK_Non_template)
return true;
} else {
- TNK = Actions.isTemplateName(CurScope, SS, TemplateName, ObjectType,
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateName, ObjectType,
EnteringContext, Template,
MemberOfUnknownSpecialization);
@@ -1271,7 +1273,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
case tok::code_completion: {
// Code completion for the operator name.
- Actions.CodeCompleteOperatorName(CurScope);
+ Actions.CodeCompleteOperatorName(getCurScope());
// Consume the operator token.
ConsumeCodeCompletionToken();
@@ -1332,7 +1334,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParseDeclaratorInternal(D, /*DirectDeclParser=*/0);
// Finish up the type.
- Action::TypeResult Ty = Actions.ActOnTypeName(CurScope, D);
+ Action::TypeResult Ty = Actions.ActOnTypeName(getCurScope(), D);
if (Ty.isInvalid())
return true;
@@ -1404,9 +1406,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
}
if (AllowConstructorName &&
- Actions.isCurrentClassName(*Id, CurScope, &SS)) {
+ Actions.isCurrentClassName(*Id, getCurScope(), &SS)) {
// We have parsed a constructor name.
- Result.setConstructorName(Actions.getTypeName(*Id, IdLoc, CurScope,
+ Result.setConstructorName(Actions.getTypeName(*Id, IdLoc, getCurScope(),
&SS, false),
IdLoc, IdLoc);
} else {
@@ -1431,7 +1433,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// If the template-name names the current class, then this is a constructor
if (AllowConstructorName && TemplateId->Name &&
- Actions.isCurrentClassName(*TemplateId->Name, CurScope, &SS)) {
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
if (SS.isSet()) {
// C++ [class.qual]p2 specifies that a qualified template-name
// is taken as the constructor name where a constructor can be
@@ -1444,7 +1446,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
Result.setConstructorName(Actions.getTypeName(*TemplateId->Name,
TemplateId->TemplateNameLoc,
- CurScope,
+ getCurScope(),
&SS, false),
TemplateId->TemplateNameLoc,
TemplateId->RAngleLoc);
@@ -1517,7 +1519,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// Note that this is a destructor name.
Action::TypeTy *Ty = Actions.getDestructorName(TildeLoc, *ClassName,
- ClassNameLoc, CurScope,
+ ClassNameLoc, getCurScope(),
SS, ObjectType,
EnteringContext);
if (!Ty)
@@ -1570,7 +1572,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
ExprVector PlacementArgs(Actions);
SourceLocation PlacementLParen, PlacementRParen;
- bool ParenTypeId;
+ SourceRange TypeIdParens;
DeclSpec DS;
Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
if (Tok.is(tok::l_paren)) {
@@ -1589,17 +1591,17 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
if (PlacementArgs.empty()) {
// Reset the placement locations. There was no placement.
+ TypeIdParens = SourceRange(PlacementLParen, PlacementRParen);
PlacementLParen = PlacementRParen = SourceLocation();
- ParenTypeId = true;
} else {
// We still need the type.
if (Tok.is(tok::l_paren)) {
- SourceLocation LParen = ConsumeParen();
+ TypeIdParens.setBegin(ConsumeParen());
ParseSpecifierQualifierList(DS);
DeclaratorInfo.SetSourceRange(DS.getSourceRange());
ParseDeclarator(DeclaratorInfo);
- MatchRHSPunctuation(tok::r_paren, LParen);
- ParenTypeId = true;
+ TypeIdParens.setEnd(MatchRHSPunctuation(tok::r_paren,
+ TypeIdParens.getBegin()));
} else {
if (ParseCXXTypeSpecifierSeq(DS))
DeclaratorInfo.setInvalidType(true);
@@ -1608,7 +1610,6 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
ParseDeclaratorInternal(DeclaratorInfo,
&Parser::ParseDirectNewDeclarator);
}
- ParenTypeId = false;
}
}
} else {
@@ -1621,7 +1622,6 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
ParseDeclaratorInternal(DeclaratorInfo,
&Parser::ParseDirectNewDeclarator);
}
- ParenTypeId = false;
}
if (DeclaratorInfo.isInvalidType()) {
SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
@@ -1649,7 +1649,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
move_arg(PlacementArgs), PlacementRParen,
- ParenTypeId, DeclaratorInfo, ConstructorLParen,
+ TypeIdParens, DeclaratorInfo, ConstructorLParen,
move_arg(ConstructorArgs), ConstructorRParen);
}
@@ -1851,7 +1851,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
// will be consumed.
Result = ParseCastExpression(false/*isUnaryExpression*/,
false/*isAddressofOperand*/,
- NotCastExpr, false);
+ NotCastExpr, 0/*TypeOfCast*/);
}
// If we parsed a cast-expression, it's really a type-id, otherwise it's
@@ -1893,7 +1893,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
// Result is what ParseCastExpression returned earlier.
if (!Result.isInvalid())
- Result = Actions.ActOnCastExpr(CurScope, LParenLoc, CastTy, RParenLoc,
+ Result = Actions.ActOnCastExpr(getCurScope(), LParenLoc, CastTy, RParenLoc,
move(Result));
return move(Result);
}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
index a382a9a..8451aeb 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -146,7 +146,7 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() {
if (getLang().ObjC1 && getLang().CPlusPlus) {
// Send to 'super'.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
- NextToken().isNot(tok::period) && CurScope->isInObjcMethodScope()) {
+ NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope()) {
CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
ConsumeToken(), 0,
@@ -184,7 +184,7 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() {
// This is a message send to super: [super foo]
// This is a message sent to an expr: [super.bar foo]
switch (Action::ObjCMessageKind Kind
- = Actions.getObjCMessageKind(CurScope, II, IILoc,
+ = Actions.getObjCMessageKind(getCurScope(), II, IILoc,
II == Ident_super,
NextToken().is(tok::period),
ReceiverType)) {
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
index 9cfe734..68473a5 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -31,7 +31,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtDirectives() {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, false);
+ Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, false);
ConsumeCodeCompletionToken();
}
@@ -130,7 +130,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
// Code completion after '@interface'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCInterfaceDecl(CurScope);
+ Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -148,7 +148,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
SourceLocation categoryLoc, rparenLoc;
IdentifierInfo *categoryId = 0;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCInterfaceCategory(CurScope, nameId, nameLoc);
+ Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
ConsumeCodeCompletionToken();
}
@@ -203,7 +203,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
// Code completion of superclass names.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCSuperclass(CurScope, nameId, nameLoc);
+ Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
ConsumeCodeCompletionToken();
}
@@ -283,7 +283,7 @@ struct Parser::ObjCPropertyCallback : FieldCallback {
FD.D.getIdentifier());
bool isOverridingProperty = false;
DeclPtrTy Property =
- P.Actions.ActOnProperty(P.CurScope, AtLoc, FD, OCDS,
+ P.Actions.ActOnProperty(P.getCurScope(), AtLoc, FD, OCDS,
GetterSel, SetterSel, IDecl,
&isOverridingProperty,
MethodImplKind);
@@ -347,7 +347,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl,
// Code completion within an Objective-C interface.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(CurScope,
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
ObjCImpDecl? Action::CCC_ObjCImplementation
: Action::CCC_ObjCInterface);
ConsumeCodeCompletionToken();
@@ -370,7 +370,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl,
// Otherwise, we have an @ directive, eat the @.
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, true);
+ Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, true);
ConsumeCodeCompletionToken();
break;
}
@@ -437,7 +437,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl,
// We break out of the big loop in two cases: when we see @end or when we see
// EOF. In the former case, eat the @end. In the later case, emit an error.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, true);
+ Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, true);
ConsumeCodeCompletionToken();
} else if (Tok.isObjCAtKeyword(tok::objc_end))
ConsumeToken(); // the "end" identifier
@@ -446,7 +446,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl,
// Insert collected methods declarations into the @interface object.
// This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
- Actions.ActOnAtEnd(CurScope, AtEnd, interfaceDecl,
+ Actions.ActOnAtEnd(getCurScope(), AtEnd, interfaceDecl,
allMethods.data(), allMethods.size(),
allProperties.data(), allProperties.size(),
allTUVariables.data(), allTUVariables.size());
@@ -476,7 +476,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, DeclPtrTy ClassDecl,
while (1) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertyFlags(CurScope, DS);
+ Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
ConsumeCodeCompletionToken();
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -509,10 +509,10 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, DeclPtrTy ClassDecl,
if (Tok.is(tok::code_completion)) {
if (II->getNameStart()[0] == 's')
- Actions.CodeCompleteObjCPropertySetter(CurScope, ClassDecl,
+ Actions.CodeCompleteObjCPropertySetter(getCurScope(), ClassDecl,
Methods, NumMethods);
else
- Actions.CodeCompleteObjCPropertyGetter(CurScope, ClassDecl,
+ Actions.CodeCompleteObjCPropertyGetter(getCurScope(), ClassDecl,
Methods, NumMethods);
ConsumeCodeCompletionToken();
}
@@ -780,7 +780,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ParsingDeclRAIIObject PD(*this);
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCMethodDecl(CurScope, mType == tok::minus,
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
/*ReturnType=*/0, IDecl);
ConsumeCodeCompletionToken();
}
@@ -797,7 +797,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
MethodAttrs.reset(ParseGNUAttributes());
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCMethodDecl(CurScope, mType == tok::minus,
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
ReturnType, IDecl);
ConsumeCodeCompletionToken();
}
@@ -856,6 +856,20 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
ArgInfo.ArgAttrs = ParseGNUAttributes();
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ ConsumeCodeCompletionToken();
+ KeyIdents.push_back(SelIdent);
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/true,
+ ReturnType,
+ KeyIdents.data(),
+ KeyIdents.size());
+ KeyIdents.pop_back();
+ break;
+ }
+
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_ident); // missing argument name.
break;
@@ -868,6 +882,18 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ArgInfos.push_back(ArgInfo);
KeyIdents.push_back(SelIdent);
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ ConsumeCodeCompletionToken();
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/false,
+ ReturnType,
+ KeyIdents.data(),
+ KeyIdents.size());
+ break;
+ }
+
// Check for another keyword selector.
SourceLocation Loc;
SelIdent = ParseObjCSelectorPiece(Loc);
@@ -892,7 +918,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
Declarator ParmDecl(DS, Declarator::PrototypeContext);
ParseDeclarator(ParmDecl);
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
- DeclPtrTy Param = Actions.ActOnParamDeclarator(CurScope, ParmDecl);
+ DeclPtrTy Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
ParmDecl.getIdentifierLoc(),
Param,
@@ -1014,7 +1040,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
- Diag(Tok, diag::ext_extra_struct_semi)
+ Diag(Tok, diag::ext_extra_ivar_semi)
<< FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
continue;
@@ -1025,7 +1051,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
ConsumeToken(); // eat the @ sign
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtVisibility(CurScope);
+ Actions.CodeCompleteObjCAtVisibility(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -1044,7 +1070,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
}
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(CurScope,
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
Action::CCC_ObjCInstanceVariableList);
ConsumeCodeCompletionToken();
}
@@ -1063,7 +1089,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
DeclPtrTy invoke(FieldDeclarator &FD) {
// Install the declarator into the interface decl.
DeclPtrTy Field
- = P.Actions.ActOnIvar(P.CurScope,
+ = P.Actions.ActOnIvar(P.getCurScope(),
FD.D.getDeclSpec().getSourceRange().getBegin(),
IDecl, FD.D, FD.BitfieldSize, visibility);
if (Field)
@@ -1087,7 +1113,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
// Call ActOnFields() even if we don't have any decls. This is useful
// for code rewriting tools that need to be aware of the empty list.
- Actions.ActOnFields(CurScope, atLoc, interfaceDecl,
+ Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl,
AllIvarDecls.data(), AllIvarDecls.size(),
LBraceLoc, RBraceLoc, 0);
return;
@@ -1116,7 +1142,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
ConsumeToken(); // the "protocol" identifier
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCProtocolDecl(CurScope);
+ Actions.CodeCompleteObjCProtocolDecl(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -1202,7 +1228,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration(
// Code completion after '@implementation'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCImplementationDecl(CurScope);
+ Actions.CodeCompleteObjCImplementationDecl(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -1221,7 +1247,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration(
IdentifierInfo *categoryId = 0;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCImplementationCategory(CurScope, nameId, nameLoc);
+ Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
ConsumeCodeCompletionToken();
}
@@ -1277,7 +1303,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtEndDeclaration(SourceRange atEnd) {
DeclPtrTy Result = ObjCImpDecl;
ConsumeToken(); // the "end" identifier
if (ObjCImpDecl) {
- Actions.ActOnAtEnd(CurScope, atEnd, ObjCImpDecl);
+ Actions.ActOnAtEnd(getCurScope(), atEnd, ObjCImpDecl);
ObjCImpDecl = DeclPtrTy();
PendingObjCImpDecl.pop_back();
}
@@ -1292,7 +1318,7 @@ Parser::DeclGroupPtrTy Parser::RetrievePendingObjCImpDecl() {
if (PendingObjCImpDecl.empty())
return Actions.ConvertDeclToDeclGroup(DeclPtrTy());
DeclPtrTy ImpDecl = PendingObjCImpDecl.pop_back_val();
- Actions.ActOnAtEnd(CurScope, SourceRange(), ImpDecl);
+ Actions.ActOnAtEnd(getCurScope(), SourceRange(), ImpDecl);
return Actions.ConvertDeclToDeclGroup(ImpDecl);
}
@@ -1341,7 +1367,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
while (true) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertyDefinition(CurScope, ObjCImpDecl);
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope(), ObjCImpDecl);
ConsumeCodeCompletionToken();
}
@@ -1359,7 +1385,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
ConsumeToken(); // consume '='
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertySynthesizeIvar(CurScope, propertyId,
+ Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId,
ObjCImpDecl);
ConsumeCodeCompletionToken();
}
@@ -1371,7 +1397,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
propertyIvar = Tok.getIdentifierInfo();
ConsumeToken(); // consume ivar-name
}
- Actions.ActOnPropertyImplDecl(CurScope, atLoc, propertyLoc, true, ObjCImpDecl,
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, true, ObjCImpDecl,
propertyId, propertyIvar);
if (Tok.isNot(tok::comma))
break;
@@ -1399,7 +1425,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
SourceLocation loc = ConsumeToken(); // consume dynamic
while (true) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertyDefinition(CurScope, ObjCImpDecl);
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope(), ObjCImpDecl);
ConsumeCodeCompletionToken();
}
@@ -1411,7 +1437,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
IdentifierInfo *propertyId = Tok.getIdentifierInfo();
SourceLocation propertyLoc = ConsumeToken(); // consume property name
- Actions.ActOnPropertyImplDecl(CurScope, atLoc, propertyLoc, false, ObjCImpDecl,
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, false, ObjCImpDecl,
propertyId, 0);
if (Tok.isNot(tok::comma))
@@ -1442,7 +1468,7 @@ Parser::OwningStmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) {
}
// consume ';'
ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@throw");
- return Actions.ActOnObjCAtThrowStmt(atLoc, move(Res), CurScope);
+ return Actions.ActOnObjCAtThrowStmt(atLoc, move(Res), getCurScope());
}
/// objc-synchronized-statement:
@@ -1536,7 +1562,7 @@ Parser::OwningStmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
// Inform the actions module about the declarator, so it
// gets added to the current scope.
- FirstPart = Actions.ActOnObjCExceptionDecl(CurScope, ParmDecl);
+ FirstPart = Actions.ActOnObjCExceptionDecl(getCurScope(), ParmDecl);
} else
ConsumeToken(); // consume '...'
@@ -1633,7 +1659,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() {
// Tell the actions module that we have entered a method definition with the
// specified Declarator for the method.
- Actions.ActOnStartOfObjCMethodDef(CurScope, MDecl);
+ Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl);
OwningStmtResult FnBody(ParseCompoundStatementBody());
@@ -1653,7 +1679,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() {
Parser::OwningStmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtStatement(CurScope);
+ Actions.CodeCompleteObjCAtStatement(getCurScope());
ConsumeCodeCompletionToken();
return StmtError();
}
@@ -1684,7 +1710,7 @@ Parser::OwningStmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
Parser::OwningExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
switch (Tok.getKind()) {
case tok::code_completion:
- Actions.CodeCompleteObjCAtExpression(CurScope);
+ Actions.CodeCompleteObjCAtExpression(getCurScope());
ConsumeCodeCompletionToken();
return ExprError();
@@ -1730,7 +1756,6 @@ Parser::OwningExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
/// expression
/// simple-type-specifier
/// typename-specifier
-
bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope))
@@ -1785,7 +1810,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
// typename-specifier we parsed into a type and parse the
// remainder of the class message.
Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
- TypeResult Type = Actions.ActOnTypeName(CurScope, DeclaratorInfo);
+ TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Type.isInvalid())
return true;
@@ -1794,6 +1819,18 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
return false;
}
+/// \brief Determine whether the parser is currently referring to a an
+/// Objective-C message send, using a simplified heuristic to avoid overhead.
+///
+/// This routine will only return true for a subset of valid message-send
+/// expressions.
+bool Parser::isSimpleObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && getLang().ObjC1 &&
+ "Incorrect start for isSimpleObjCMessageExpression");
+ return GetLookAheadToken(1).is(tok::identifier) &&
+ GetLookAheadToken(2).is(tok::identifier);
+}
+
/// objc-message-expr:
/// '[' objc-receiver objc-message-args ']'
///
@@ -1807,6 +1844,13 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() {
assert(Tok.is(tok::l_square) && "'[' expected");
SourceLocation LBracLoc = ConsumeBracket(); // consume '['
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
if (getLang().CPlusPlus) {
// We completely separate the C and C++ cases because C++ requires
// more complicated (read: slower) parsing.
@@ -1815,7 +1859,7 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() {
// FIXME: This doesn't benefit from the same typo-correction we
// get in Objective-C.
if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
- NextToken().isNot(tok::period) && CurScope->isInObjcMethodScope())
+ NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope())
return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(), 0,
ExprArg(Actions));
@@ -1833,11 +1877,13 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() {
return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
TypeOrExpr, ExprArg(Actions));
- } else if (Tok.is(tok::identifier)) {
+ }
+
+ if (Tok.is(tok::identifier)) {
IdentifierInfo *Name = Tok.getIdentifierInfo();
SourceLocation NameLoc = Tok.getLocation();
TypeTy *ReceiverType;
- switch (Actions.getObjCMessageKind(CurScope, Name, NameLoc,
+ switch (Actions.getObjCMessageKind(getCurScope(), Name, NameLoc,
Name == Ident_super,
NextToken().is(tok::period),
ReceiverType)) {
@@ -1919,11 +1965,11 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
ExprArg ReceiverExpr) {
if (Tok.is(tok::code_completion)) {
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(CurScope, SuperLoc, 0, 0);
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, 0, 0);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(CurScope, ReceiverType, 0, 0);
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, 0, 0);
else
- Actions.CodeCompleteObjCInstanceMessage(CurScope, ReceiverExpr.get(),
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr.get(),
0, 0);
ConsumeCodeCompletionToken();
}
@@ -1968,15 +2014,15 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
// Code completion after each argument.
if (Tok.is(tok::code_completion)) {
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(CurScope, SuperLoc,
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents.data(),
KeyIdents.size());
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(CurScope, ReceiverType,
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
KeyIdents.data(),
KeyIdents.size());
else
- Actions.CodeCompleteObjCInstanceMessage(CurScope, ReceiverExpr.get(),
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr.get(),
KeyIdents.data(),
KeyIdents.size());
ConsumeCodeCompletionToken();
@@ -2034,18 +2080,18 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]);
if (SuperLoc.isValid())
- return Actions.ActOnSuperMessage(CurScope, SuperLoc, Sel,
+ return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel,
LBracLoc, SelectorLoc, RBracLoc,
Action::MultiExprArg(Actions,
KeyExprs.take(),
KeyExprs.size()));
else if (ReceiverType)
- return Actions.ActOnClassMessage(CurScope, ReceiverType, Sel,
+ return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel,
LBracLoc, SelectorLoc, RBracLoc,
Action::MultiExprArg(Actions,
KeyExprs.take(),
KeyExprs.size()));
- return Actions.ActOnInstanceMessage(CurScope, move(ReceiverExpr), Sel,
+ return Actions.ActOnInstanceMessage(getCurScope(), move(ReceiverExpr), Sel,
LBracLoc, SelectorLoc, RBracLoc,
Action::MultiExprArg(Actions,
KeyExprs.take(),
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
index c4e4a52..64a4c16 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -110,7 +110,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP, Token &PackTok) {
LParenLoc, RParenLoc);
}
-// #pragma 'options' 'align' '=' {'natural', 'mac68k', 'power', 'reset'}
+// #pragma 'options' 'align' '=' {'native','natural','mac68k','power','reset'}
void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) {
SourceLocation OptionsLoc = OptionsTok.getLocation();
@@ -120,7 +120,7 @@ void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_align);
return;
}
-
+
PP.Lex(Tok);
if (Tok.isNot(tok::equal)) {
PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_equal);
@@ -136,8 +136,12 @@ void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) {
Action::PragmaOptionsAlignKind Kind = Action::POAK_Natural;
const IdentifierInfo *II = Tok.getIdentifierInfo();
- if (II->isStr("natural"))
+ if (II->isStr("native"))
+ Kind = Action::POAK_Native;
+ else if (II->isStr("natural"))
Kind = Action::POAK_Natural;
+ else if (II->isStr("packed"))
+ Kind = Action::POAK_Packed;
else if (II->isStr("power"))
Kind = Action::POAK_Power;
else if (II->isStr("mac68k"))
@@ -223,7 +227,7 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
// Perform the action to handle the pragma.
Actions.ActOnPragmaUnused(Identifiers.data(), Identifiers.size(),
- parser.CurScope, UnusedLoc, LParenLoc, RParenLoc);
+ parser.getCurScope(), UnusedLoc, LParenLoc, RParenLoc);
}
// #pragma weak identifier
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
index d9d06a1..929ec46 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -23,8 +23,8 @@ namespace clang {
class PragmaOptionsHandler : public PragmaHandler {
Action &Actions;
public:
- PragmaOptionsHandler(const IdentifierInfo *N, Action &A) : PragmaHandler(N),
- Actions(A) {}
+ explicit PragmaOptionsHandler(Action &A) : PragmaHandler("options"),
+ Actions(A) {}
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
};
@@ -32,8 +32,8 @@ public:
class PragmaPackHandler : public PragmaHandler {
Action &Actions;
public:
- PragmaPackHandler(const IdentifierInfo *N, Action &A) : PragmaHandler(N),
- Actions(A) {}
+ explicit PragmaPackHandler(Action &A) : PragmaHandler("pack"),
+ Actions(A) {}
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
};
@@ -42,8 +42,8 @@ class PragmaUnusedHandler : public PragmaHandler {
Action &Actions;
Parser &parser;
public:
- PragmaUnusedHandler(const IdentifierInfo *N, Action &A, Parser& p)
- : PragmaHandler(N), Actions(A), parser(p) {}
+ PragmaUnusedHandler(Action &A, Parser& p)
+ : PragmaHandler("unused"), Actions(A), parser(p) {}
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
};
@@ -51,8 +51,8 @@ public:
class PragmaWeakHandler : public PragmaHandler {
Action &Actions;
public:
- PragmaWeakHandler(const IdentifierInfo *N, Action &A)
- : PragmaHandler(N), Actions(A) {}
+ explicit PragmaWeakHandler(Action &A)
+ : PragmaHandler("weak"), Actions(A) {}
virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
};
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
index 98c0058..c908ed9 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -77,6 +77,8 @@ Parser::OwningStmtResult
Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
const char *SemiError = 0;
OwningStmtResult Res(Actions);
+
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
CXX0XAttributeList Attr;
if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier())
@@ -96,8 +98,8 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
}
case tok::code_completion:
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Statement);
- ConsumeToken();
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Statement);
+ ConsumeCodeCompletionToken();
return ParseStatementOrDeclaration(OnlyStatement);
case tok::identifier:
@@ -282,7 +284,7 @@ Parser::OwningStmtResult Parser::ParseCaseStatement(AttributeList *Attr) {
SourceLocation CaseLoc = ConsumeToken(); // eat the 'case'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteCase(CurScope);
+ Actions.CodeCompleteCase(getCurScope());
ConsumeCodeCompletionToken();
}
@@ -402,7 +404,7 @@ Parser::OwningStmtResult Parser::ParseDefaultStatement(AttributeList *Attr) {
return StmtError();
return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
- move(SubStmt), CurScope);
+ move(SubStmt), getCurScope());
}
@@ -552,7 +554,7 @@ bool Parser::ParseParenExprOrCondition(OwningExprResult &ExprResult,
// If required, convert to a boolean value.
if (!ExprResult.isInvalid() && ConvertToBoolean)
ExprResult
- = Actions.ActOnBooleanCondition(CurScope, Loc, move(ExprResult));
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, move(ExprResult));
}
// If the parser was confused by the condition and we don't have a ')', try to
@@ -668,10 +670,10 @@ Parser::OwningStmtResult Parser::ParseIfStatement(AttributeList *Attr) {
// Regardless of whether or not InnerScope actually pushed a scope, set the
// ElseScope flag for the innermost scope so we can diagnose use of the if
// condition variable in C++.
- unsigned OldFlags = CurScope->getFlags();
- CurScope->setFlags(OldFlags | Scope::ElseScope);
+ unsigned OldFlags = getCurScope()->getFlags();
+ getCurScope()->setFlags(OldFlags | Scope::ElseScope);
ElseStmt = ParseStatement();
- CurScope->setFlags(OldFlags);
+ getCurScope()->setFlags(OldFlags);
// Pop the 'else' scope if needed.
InnerScope.Exit();
@@ -997,7 +999,7 @@ Parser::OwningStmtResult Parser::ParseForStatement(AttributeList *Attr) {
DeclPtrTy SecondVar;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(CurScope,
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
C99orCXXorObjC? Action::CCC_ForInit
: Action::CCC_Expression);
ConsumeCodeCompletionToken();
@@ -1061,7 +1063,7 @@ Parser::OwningStmtResult Parser::ParseForStatement(AttributeList *Attr) {
else {
Second = ParseExpression();
if (!Second.isInvalid())
- Second = Actions.ActOnBooleanCondition(CurScope, ForLoc,
+ Second = Actions.ActOnBooleanCondition(getCurScope(), ForLoc,
move(Second));
}
SecondPartIsInvalid = Second.isInvalid();
@@ -1170,7 +1172,7 @@ Parser::OwningStmtResult Parser::ParseContinueStatement(AttributeList *Attr) {
delete Attr;
SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'.
- return Actions.ActOnContinueStmt(ContinueLoc, CurScope);
+ return Actions.ActOnContinueStmt(ContinueLoc, getCurScope());
}
/// ParseBreakStatement
@@ -1184,7 +1186,7 @@ Parser::OwningStmtResult Parser::ParseBreakStatement(AttributeList *Attr) {
delete Attr;
SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'.
- return Actions.ActOnBreakStmt(BreakLoc, CurScope);
+ return Actions.ActOnBreakStmt(BreakLoc, getCurScope());
}
/// ParseReturnStatement
@@ -1199,6 +1201,13 @@ Parser::OwningStmtResult Parser::ParseReturnStatement(AttributeList *Attr) {
OwningExprResult R(Actions);
if (Tok.isNot(tok::semi)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteReturn(getCurScope());
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+
R = ParseExpression();
if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
SkipUntil(tok::semi, false, true);
@@ -1588,7 +1597,7 @@ Parser::OwningStmtResult Parser::ParseCXXCatchBlock() {
return StmtError();
Declarator ExDecl(DS, Declarator::CXXCatchContext);
ParseDeclarator(ExDecl);
- ExceptionDecl = Actions.ActOnExceptionDeclarator(CurScope, ExDecl);
+ ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
} else
ConsumeToken();
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
index c87ddad..e1aaf91 100644
--- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -201,7 +201,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
if (Tok.is(tok::semi)) {
DeclEnd = ConsumeToken();
- DeclPtrTy Decl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS);
+ DeclPtrTy Decl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
DS.complete(Decl);
return Decl;
}
@@ -238,7 +238,7 @@ Parser::ParseSingleDeclarationAfterTemplate(
}
if (DeclaratorInfo.isFunctionDeclarator() &&
- isStartOfFunctionDefinition()) {
+ isStartOfFunctionDefinition(DeclaratorInfo)) {
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(Tok, diag::err_function_declared_typedef);
@@ -341,8 +341,37 @@ Parser::ParseTemplateParameterList(unsigned Depth,
/// \brief Determine whether the parser is at the start of a template
/// type parameter.
bool Parser::isStartOfTemplateTypeParameter() {
- if (Tok.is(tok::kw_class))
- return true;
+ if (Tok.is(tok::kw_class)) {
+ // "class" may be the start of an elaborated-type-specifier or a
+ // type-parameter. Per C++ [temp.param]p3, we prefer the type-parameter.
+ switch (NextToken().getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ case tok::ellipsis:
+ return true;
+
+ case tok::identifier:
+ // This may be either a type-parameter or an elaborated-type-specifier.
+ // We have to look further.
+ break;
+
+ default:
+ return false;
+ }
+
+ switch (GetLookAheadToken(2).getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ return true;
+
+ default:
+ return false;
+ }
+ }
if (Tok.isNot(tok::kw_typename))
return false;
@@ -442,22 +471,19 @@ Parser::DeclPtrTy Parser::ParseTypeParameter(unsigned Depth, unsigned Position){
return DeclPtrTy();
}
- DeclPtrTy TypeParam = Actions.ActOnTypeParameter(CurScope, TypenameKeyword,
- Ellipsis, EllipsisLoc,
- KeyLoc, ParamName, NameLoc,
- Depth, Position);
-
- // Grab a default type id (if given).
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the type parameter into the local scope.
+ SourceLocation EqualLoc;
+ TypeTy *DefaultArg = 0;
if (Tok.is(tok::equal)) {
- SourceLocation EqualLoc = ConsumeToken();
- SourceLocation DefaultLoc = Tok.getLocation();
- TypeResult DefaultType = ParseTypeName();
- if (!DefaultType.isInvalid())
- Actions.ActOnTypeParameterDefault(TypeParam, EqualLoc, DefaultLoc,
- DefaultType.get());
+ EqualLoc = ConsumeToken();
+ DefaultArg = ParseTypeName().get();
}
-
- return TypeParam;
+
+ return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, Ellipsis,
+ EllipsisLoc, KeyLoc, ParamName, NameLoc,
+ Depth, Position, EqualLoc, DefaultArg);
}
/// ParseTemplateTemplateParameter - Handle the parsing of template
@@ -512,28 +538,28 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
TemplateParams.size(),
RAngleLoc);
- Parser::DeclPtrTy Param
- = Actions.ActOnTemplateTemplateParameter(CurScope, TemplateLoc,
- ParamList, ParamName,
- NameLoc, Depth, Position);
-
- // Get the a default value, if given.
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ ParsedTemplateArgument DefaultArg;
if (Tok.is(tok::equal)) {
- SourceLocation EqualLoc = ConsumeToken();
- ParsedTemplateArgument Default = ParseTemplateTemplateArgument();
- if (Default.isInvalid()) {
+ EqualLoc = ConsumeToken();
+ DefaultArg = ParseTemplateTemplateArgument();
+ if (DefaultArg.isInvalid()) {
Diag(Tok.getLocation(),
diag::err_default_template_template_parameter_not_template);
static const tok::TokenKind EndToks[] = {
tok::comma, tok::greater, tok::greatergreater
};
SkipUntil(EndToks, 3, true, true);
- return Param;
- } else if (Param)
- Actions.ActOnTemplateTemplateParameterDefault(Param, EqualLoc, Default);
+ }
}
-
- return Param;
+
+ return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc,
+ ParamList, ParamName,
+ NameLoc, Depth, Position,
+ EqualLoc, DefaultArg);
}
/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
@@ -542,13 +568,6 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
/// template-parameter:
/// ...
/// parameter-declaration
-///
-/// NOTE: It would be ideal to simply call out to ParseParameterDeclaration(),
-/// but that didn't work out to well. Instead, this tries to recrate the basic
-/// parsing of parameter declarations, but tries to constrain it for template
-/// parameters.
-/// FIXME: We need to make a ParseParameterDeclaration that works for
-/// non-type template parameters and normal function parameters.
Parser::DeclPtrTy
Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
SourceLocation StartLoc = Tok.getLocation();
@@ -572,13 +591,13 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
return DeclPtrTy();
}
- // Create the parameter.
- DeclPtrTy Param = Actions.ActOnNonTypeTemplateParameter(CurScope, ParamDecl,
- Depth, Position);
-
// If there is a default value, parse it.
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ OwningExprResult DefaultArg(Actions);
if (Tok.is(tok::equal)) {
- SourceLocation EqualLoc = ConsumeToken();
+ EqualLoc = ConsumeToken();
// C++ [temp.param]p15:
// When parsing a default template-argument for a non-type
@@ -587,15 +606,15 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
// operator.
GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- OwningExprResult DefaultArg = ParseAssignmentExpression();
+ DefaultArg = ParseAssignmentExpression();
if (DefaultArg.isInvalid())
SkipUntil(tok::comma, tok::greater, true, true);
- else if (Param)
- Actions.ActOnNonTypeTemplateParameterDefault(Param, EqualLoc,
- move(DefaultArg));
}
- return Param;
+ // Create the parameter.
+ return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl,
+ Depth, Position, EqualLoc,
+ move(DefaultArg));
}
/// \brief Parses a template-id that after the template name has
@@ -885,15 +904,14 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// If the next token signals the end of a template argument,
// then we have a dependent template name that could be a template
// template argument.
- if (isEndOfTemplateArgument(Tok)) {
- TemplateTy Template
- = Actions.ActOnDependentTemplateName(TemplateLoc, SS, Name,
+ TemplateTy Template;
+ if (isEndOfTemplateArgument(Tok) &&
+ Actions.ActOnDependentTemplateName(getCurScope(), TemplateLoc, SS, Name,
/*ObjectType=*/0,
- /*EnteringContext=*/false);
- if (Template.get())
- return ParsedTemplateArgument(SS, Template, Name.StartLocation);
- }
- }
+ /*EnteringContext=*/false,
+ Template))
+ return ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ }
} else if (Tok.is(tok::identifier)) {
// We may have a (non-dependent) template name.
TemplateTy Template;
@@ -903,7 +921,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
if (isEndOfTemplateArgument(Tok)) {
bool MemberOfUnknownSpecialization;
- TemplateNameKind TNK = Actions.isTemplateName(CurScope, SS, Name,
+ TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS, Name,
/*ObjectType=*/0,
/*EnteringContext=*/false,
Template,
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
index 2968970..ac78f11 100644
--- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -26,30 +26,24 @@ Parser::Parser(Preprocessor &pp, Action &actions)
GreaterThanIsOperator(true), ColonIsSacred(false),
TemplateParameterDepth(0) {
Tok.setKind(tok::eof);
- CurScope = 0;
+ Actions.CurScope = 0;
NumCachedScopes = 0;
ParenCount = BracketCount = BraceCount = 0;
ObjCImpDecl = DeclPtrTy();
// Add #pragma handlers. These are removed and destroyed in the
// destructor.
- OptionsHandler.reset(new
- PragmaOptionsHandler(&PP.getIdentifierTable().get("options"),
- actions));
- PP.AddPragmaHandler(0, OptionsHandler.get());
-
- PackHandler.reset(new
- PragmaPackHandler(&PP.getIdentifierTable().get("pack"), actions));
- PP.AddPragmaHandler(0, PackHandler.get());
-
- UnusedHandler.reset(new
- PragmaUnusedHandler(&PP.getIdentifierTable().get("unused"), actions,
- *this));
- PP.AddPragmaHandler(0, UnusedHandler.get());
-
- WeakHandler.reset(new
- PragmaWeakHandler(&PP.getIdentifierTable().get("weak"), actions));
- PP.AddPragmaHandler(0, WeakHandler.get());
+ OptionsHandler.reset(new PragmaOptionsHandler(actions));
+ PP.AddPragmaHandler(OptionsHandler.get());
+
+ PackHandler.reset(new PragmaPackHandler(actions));
+ PP.AddPragmaHandler(PackHandler.get());
+
+ UnusedHandler.reset(new PragmaUnusedHandler(actions, *this));
+ PP.AddPragmaHandler(UnusedHandler.get());
+
+ WeakHandler.reset(new PragmaWeakHandler(actions));
+ PP.AddPragmaHandler(WeakHandler.get());
}
/// If a crash happens while the parser is active, print out a line indicating
@@ -261,25 +255,25 @@ bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
void Parser::EnterScope(unsigned ScopeFlags) {
if (NumCachedScopes) {
Scope *N = ScopeCache[--NumCachedScopes];
- N->Init(CurScope, ScopeFlags);
- CurScope = N;
+ N->Init(getCurScope(), ScopeFlags);
+ Actions.CurScope = N;
} else {
- CurScope = new Scope(CurScope, ScopeFlags);
+ Actions.CurScope = new Scope(getCurScope(), ScopeFlags);
}
- CurScope->setNumErrorsAtStart(Diags.getNumErrors());
+ getCurScope()->setNumErrorsAtStart(Diags.getNumErrors());
}
/// ExitScope - Pop a scope off the scope stack.
void Parser::ExitScope() {
- assert(CurScope && "Scope imbalance!");
+ assert(getCurScope() && "Scope imbalance!");
// Inform the actions module that this scope is going away if there are any
// decls in it.
- if (!CurScope->decl_empty())
- Actions.ActOnPopScope(Tok.getLocation(), CurScope);
+ if (!getCurScope()->decl_empty())
+ Actions.ActOnPopScope(Tok.getLocation(), getCurScope());
- Scope *OldScope = CurScope;
- CurScope = OldScope->getParent();
+ Scope *OldScope = getCurScope();
+ Actions.CurScope = OldScope->getParent();
if (NumCachedScopes == ScopeCacheSize)
delete OldScope;
@@ -296,20 +290,21 @@ void Parser::ExitScope() {
Parser::~Parser() {
// If we still have scopes active, delete the scope tree.
- delete CurScope;
-
+ delete getCurScope();
+ Actions.CurScope = 0;
+
// Free the scope cache.
for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
delete ScopeCache[i];
// Remove the pragma handlers we installed.
- PP.RemovePragmaHandler(0, OptionsHandler.get());
+ PP.RemovePragmaHandler(OptionsHandler.get());
OptionsHandler.reset();
- PP.RemovePragmaHandler(0, PackHandler.get());
+ PP.RemovePragmaHandler(PackHandler.get());
PackHandler.reset();
- PP.RemovePragmaHandler(0, UnusedHandler.get());
+ PP.RemovePragmaHandler(UnusedHandler.get());
UnusedHandler.reset();
- PP.RemovePragmaHandler(0, WeakHandler.get());
+ PP.RemovePragmaHandler(WeakHandler.get());
WeakHandler.reset();
}
@@ -320,9 +315,9 @@ void Parser::Initialize() {
ConsumeToken();
// Create the translation unit scope. Install it as the current scope.
- assert(CurScope == 0 && "A scope is already active?");
+ assert(getCurScope() == 0 && "A scope is already active?");
EnterScope(Scope::DeclScope);
- Actions.ActOnTranslationUnitScope(Tok.getLocation(), CurScope);
+ Actions.ActOnTranslationUnitScope(Tok.getLocation(), getCurScope());
if (Tok.is(tok::eof) &&
!getLang().CPlusPlus) // Empty source file is an extension in C
@@ -375,7 +370,7 @@ void Parser::ParseTranslationUnit() {
/*parse them all*/;
ExitScope();
- assert(CurScope == 0 && "Scope imbalance!");
+ assert(getCurScope() == 0 && "Scope imbalance!");
}
/// ParseExternalDeclaration:
@@ -401,6 +396,8 @@ void Parser::ParseTranslationUnit() {
///
/// [C++0x/GNU] 'extern' 'template' declaration
Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
DeclPtrTy SingleDecl;
switch (Tok.getKind()) {
case tok::semi:
@@ -455,7 +452,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr)
SingleDecl = ParseObjCMethodDefinition();
break;
case tok::code_completion:
- Actions.CodeCompleteOrdinaryName(CurScope,
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
ObjCImpDecl? Action::CCC_ObjCImplementation
: Action::CCC_Namespace);
ConsumeCodeCompletionToken();
@@ -497,7 +494,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr)
/// \brief Determine whether the current token, if it occurs after a
/// declarator, continues a declaration or declaration list.
-bool Parser::isDeclarationAfterDeclarator() {
+bool Parser::isDeclarationAfterDeclarator() const {
return Tok.is(tok::equal) || // int X()= -> not a function def
Tok.is(tok::comma) || // int X(), -> not a function def
Tok.is(tok::semi) || // int X(); -> not a function def
@@ -509,12 +506,17 @@ bool Parser::isDeclarationAfterDeclarator() {
/// \brief Determine whether the current token, if it occurs after a
/// declarator, indicates the start of a function definition.
-bool Parser::isStartOfFunctionDefinition() {
+bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
+ assert(Declarator.getTypeObject(0).Kind == DeclaratorChunk::Function &&
+ "Isn't a function declarator");
if (Tok.is(tok::l_brace)) // int X() {}
return true;
- if (!getLang().CPlusPlus)
- return isDeclarationSpecifier(); // int X(f) int f; {}
+ // Handle K&R C argument lists: int X(f) int f; {}
+ if (!getLang().CPlusPlus &&
+ Declarator.getTypeObject(0).Fun.isKNRPrototype())
+ return isDeclarationSpecifier();
+
return Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
Tok.is(tok::kw_try); // X() try { ... }
}
@@ -549,7 +551,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
ConsumeToken();
- DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS);
+ DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
DS.complete(TheDecl);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
@@ -637,7 +639,7 @@ Parser::DeclPtrTy Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// If this declaration was formed with a K&R-style identifier list for the
// arguments, parse declarations for all of the args next.
// int foo(a,b) int a; float b; {}
- if (!FTI.hasPrototype && FTI.NumArgs != 0)
+ if (FTI.isKNRPrototype())
ParseKNRParamDeclarations(D);
// We should have either an opening brace or, in a C++ constructor,
@@ -660,12 +662,12 @@ Parser::DeclPtrTy Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// Tell the actions module that we have entered a function definition with the
// specified Declarator for the function.
DeclPtrTy Res = TemplateInfo.TemplateParams?
- Actions.ActOnStartOfFunctionTemplateDef(CurScope,
+ Actions.ActOnStartOfFunctionTemplateDef(getCurScope(),
Action::MultiTemplateParamsArg(Actions,
TemplateInfo.TemplateParams->data(),
TemplateInfo.TemplateParams->size()),
D)
- : Actions.ActOnStartOfFunctionDef(CurScope, D);
+ : Actions.ActOnStartOfFunctionDef(getCurScope(), D);
// Break out of the ParsingDeclarator context before we parse the body.
D.complete(Res);
@@ -751,7 +753,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
// Ask the actions module to compute the type for this declarator.
Action::DeclPtrTy Param =
- Actions.ActOnParamDeclarator(CurScope, ParmDeclarator);
+ Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
if (Param &&
// A missing identifier has already been diagnosed.
@@ -807,7 +809,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
}
// The actions module must verify that all arguments were declared.
- Actions.ActOnFinishKNRParamDeclarations(CurScope, D, Tok.getLocation());
+ Actions.ActOnFinishKNRParamDeclarations(getCurScope(), D, Tok.getLocation());
}
@@ -919,7 +921,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
TypeResult Ty;
if (Tok.is(tok::identifier)) {
// FIXME: check whether the next token is '<', first!
- Ty = Actions.ActOnTypenameType(TypenameLoc, SS, *Tok.getIdentifierInfo(),
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ *Tok.getIdentifierInfo(),
Tok.getLocation());
} else if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId
@@ -934,7 +937,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
assert(Tok.is(tok::annot_typename) &&
"AnnotateTemplateIdTokenAsType isn't working properly");
if (Tok.getAnnotationValue())
- Ty = Actions.ActOnTypenameType(TypenameLoc, SS, SourceLocation(),
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ SourceLocation(),
Tok.getAnnotationValue());
else
Ty = true;
@@ -964,7 +968,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
if (Tok.is(tok::identifier)) {
// Determine whether the identifier is a type name.
if (TypeTy *Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
- Tok.getLocation(), CurScope, &SS)) {
+ Tok.getLocation(), getCurScope(), &SS)) {
// This is a typename. Replace the current token in-place with an
// annotation type token.
Tok.setKind(tok::annot_typename);
@@ -993,7 +997,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
bool MemberOfUnknownSpecialization;
if (TemplateNameKind TNK
- = Actions.isTemplateName(CurScope, SS, TemplateName,
+ = Actions.isTemplateName(getCurScope(), SS, TemplateName,
/*ObjectType=*/0, EnteringContext,
Template, MemberOfUnknownSpecialization)) {
// Consume the identifier.
@@ -1084,19 +1088,19 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
}
void Parser::CodeCompletionRecovery() {
- for (Scope *S = CurScope; S; S = S->getParent()) {
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->getFlags() & Scope::FnScope) {
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_RecoveryInFunction);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_RecoveryInFunction);
return;
}
if (S->getFlags() & Scope::ClassScope) {
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Class);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Class);
return;
}
}
- Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Namespace);
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Namespace);
}
// Anchor the Parser::FieldCallback vtable to this translation unit.
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
index 06bbbc2..addc795 100644
--- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -80,6 +80,23 @@ namespace clang {
}
};
+ /// \brief RAII object that makes sure paren/bracket/brace count is correct
+ /// after declaration/statement parsing, even when there's a parsing error.
+ class ParenBraceBracketBalancer {
+ Parser &P;
+ unsigned short ParenCount, BracketCount, BraceCount;
+ public:
+ ParenBraceBracketBalancer(Parser &p)
+ : P(p), ParenCount(p.ParenCount), BracketCount(p.BracketCount),
+ BraceCount(p.BraceCount) { }
+
+ ~ParenBraceBracketBalancer() {
+ P.ParenCount = ParenCount;
+ P.BracketCount = BracketCount;
+ P.BraceCount = BraceCount;
+ }
+ };
+
} // end namespace clang
#endif
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt
index ce9e1ed..ce728af 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt
@@ -2,8 +2,14 @@ set(LLVM_NO_RTTI 1)
add_clang_library(clangRewrite
DeltaTree.cpp
+ FixItRewriter.cpp
+ FrontendActions.cpp
+ HTMLPrint.cpp
HTMLRewrite.cpp
+ RewriteMacros.cpp
+ RewriteObjC.cpp
RewriteRope.cpp
+ RewriteTest.cpp
Rewriter.cpp
TokenRewriter.cpp
)
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
index 7c9a566..29ac7e3 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/FixItRewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
@@ -13,7 +13,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/FixItRewriter.h"
+#include "clang/Rewrite/FixItRewriter.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
new file mode 100644
index 0000000..6da3b4b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
@@ -0,0 +1,106 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/FixItRewriter.h"
+#include "clang/Rewrite/Rewriters.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
+ return CreateHTMLPrinter(OS, CI.getPreprocessor());
+ return 0;
+}
+
+FixItAction::FixItAction() {}
+FixItAction::~FixItAction() {}
+
+ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ return new ASTConsumer();
+}
+
+class FixItActionSuffixInserter : public FixItPathRewriter {
+ std::string NewSuffix;
+
+public:
+ explicit FixItActionSuffixInserter(std::string NewSuffix)
+ : NewSuffix(NewSuffix) {}
+
+ std::string RewriteFilename(const std::string &Filename) {
+ llvm::sys::Path Path(Filename);
+ std::string Suffix = Path.getSuffix();
+ Path.eraseSuffix();
+ Path.appendSuffix(NewSuffix + "." + Suffix);
+ return Path.c_str();
+ }
+};
+
+bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
+ llvm::StringRef Filename) {
+ const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts();
+ if (!FEOpts.FixItSuffix.empty()) {
+ PathRewriter.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix));
+ } else {
+ PathRewriter.reset();
+ }
+ Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), PathRewriter.get()));
+ return true;
+}
+
+void FixItAction::EndSourceFileAction() {
+ // Otherwise rewrite all files.
+ Rewriter->WriteFixedFiles();
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp"))
+ return CreateObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros);
+ return 0;
+}
+
+void RewriteMacrosAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ llvm::raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ RewriteMacrosInInput(CI.getPreprocessor(), OS);
+}
+
+void RewriteTestAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile());
+ if (!OS) return;
+
+ DoRewriteTest(CI.getPreprocessor(), OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
index 9ea8cb3..f66bfcb 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/HTMLPrint.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/ASTConsumers.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Makefile b/contrib/llvm/tools/clang/lib/Rewrite/Makefile
index 04c3530..1c5b8a8 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/Makefile
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangRewrite
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
index 954e8e2..910fa6b 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/RewriteMacros.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
@@ -12,7 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/Rewriters.h"
#include "clang/Rewrite/Rewriter.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/SourceManager.h"
diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
index 5dd7bdf..489fec9 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/RewriteObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/ASTConsumers.h"
#include "clang/Rewrite/Rewriter.h"
#include "clang/AST/AST.h"
#include "clang/AST/ASTConsumer.h"
@@ -268,6 +268,8 @@ namespace {
void RewriteMethodDeclaration(ObjCMethodDecl *Method);
void RewriteProperty(ObjCPropertyDecl *prop);
void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
void RewriteTypeOfDecl(VarDecl *VD);
@@ -835,11 +837,12 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
Getr += ")"; // close the precedence "scope" for "*".
// Now, emit the argument types (if any).
- if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
Getr += "(";
for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
if (i) Getr += ", ";
- std::string ParamStr = FT->getArgType(i).getAsString();
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->PrintingPolicy);
Getr += ParamStr;
}
if (FT->isVariadic()) {
@@ -1047,11 +1050,12 @@ void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
PointeeTy = BPT->getPointeeType();
if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
- ResultStr += FPRetType->getResultType().getAsString();
+ ResultStr += FPRetType->getResultType().getAsString(
+ Context->PrintingPolicy);
ResultStr += "(*";
}
} else
- ResultStr += T.getAsString();
+ ResultStr += T.getAsString(Context->PrintingPolicy);
}
void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
@@ -1107,10 +1111,11 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
ResultStr += " *";
}
else
- ResultStr += Context->getObjCClassType().getAsString();
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->PrintingPolicy);
ResultStr += " self, ";
- ResultStr += Context->getObjCSelType().getAsString();
+ ResultStr += Context->getObjCSelType().getAsString(Context->PrintingPolicy);
ResultStr += " _cmd";
// Method arguments.
@@ -1144,7 +1149,8 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
ResultStr += "(";
for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
if (i) ResultStr += ", ";
- std::string ParamStr = FT->getArgType(i).getAsString();
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->PrintingPolicy);
ResultStr += ParamStr;
}
if (FT->isVariadic()) {
@@ -1560,7 +1566,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
- elementTypeAsString = ElementType.getAsString();
+ elementTypeAsString = ElementType.getAsString(Context->PrintingPolicy);
buf += elementTypeAsString;
buf += " ";
elementName = D->getNameAsCString();
@@ -1576,7 +1582,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
// Simply use 'id' for all qualified types.
elementTypeAsString = "id";
else
- elementTypeAsString = VD->getType().getAsString();
+ elementTypeAsString = VD->getType().getAsString(Context->PrintingPolicy);
}
// struct __objcFastEnumerationState enumState = { 0 };
@@ -2107,8 +2113,8 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *Exp =
- new (Context) CallExpr(*Context, ICE, args, nargs, FT->getResultType(),
- EndLoc);
+ new (Context) CallExpr(*Context, ICE, args, nargs,
+ FT->getCallResultType(*Context), EndLoc);
return Exp;
}
@@ -2275,7 +2281,7 @@ void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) {
}
// FIXME. This will not work for multiple declarators; as in:
// __typeof__(a) b,c,d;
- std::string TypeAsString(QT.getAsString());
+ std::string TypeAsString(QT.getAsString(Context->PrintingPolicy));
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
const char *startBuf = SM->getCharacterData(DeclLoc);
if (ND->getInit()) {
@@ -2326,8 +2332,8 @@ void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) {
RewriteObjCQualifiedInterfaceTypes(FD);
}
-static void RewriteBlockPointerType(std::string& Str, QualType Type) {
- std::string TypeString(Type.getAsString());
+void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->PrintingPolicy));
const char *argPtr = TypeString.c_str();
if (!strchr(argPtr, '^')) {
Str += TypeString;
@@ -2340,9 +2346,10 @@ static void RewriteBlockPointerType(std::string& Str, QualType Type) {
}
// FIXME. Consolidate this routine with RewriteBlockPointerType.
-static void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD) {
+void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
QualType Type = VD->getType();
- std::string TypeString(Type.getAsString());
+ std::string TypeString(Type.getAsString(Context->PrintingPolicy));
const char *argPtr = TypeString.c_str();
int paren = 0;
while (*argPtr) {
@@ -2376,7 +2383,7 @@ void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
if (!proto)
return;
QualType Type = proto->getResultType();
- std::string FdStr = Type.getAsString();
+ std::string FdStr = Type.getAsString(Context->PrintingPolicy);
FdStr += " ";
FdStr += FD->getNameAsCString();
FdStr += "(";
@@ -4099,7 +4106,7 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
const FunctionType *AFT = CE->getFunctionType();
QualType RT = AFT->getResultType();
std::string StructRef = "struct " + Tag;
- std::string S = "static " + RT.getAsString() + " __" +
+ std::string S = "static " + RT.getAsString(Context->PrintingPolicy) + " __" +
funcName + "_" + "block_func_" + utostr(i);
BlockDecl *BD = CE->getBlockDecl();
@@ -5644,7 +5651,7 @@ void RewriteObjC::HandleDeclInMainFile(Decl *D) {
RewriteBlocksInFunctionProtoType(FD->getType(), FD);
// FIXME: If this should support Obj-C++, support CXXTryStmt
- if (CompoundStmt *Body = FD->getCompoundBody()) {
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
CurFunctionDef = FD;
CurFunctionDeclToDeclareForBlock = FD;
CollectPropertySetters(Body);
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
index fdb6fc3..e290921 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
@@ -532,7 +532,7 @@ RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) {
(getNumChildren()-i-1)*sizeof(Children[0]));
Children[i+1] = RHS;
++NumChildren;
- return false;
+ return 0;
}
// Okay, this node is full. Split it in half, moving WidthFactor children to
diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
index 0414678..3620700 100644
--- a/contrib/llvm/tools/clang/lib/Frontend/RewriteTest.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/TokenRewriter.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
index 376678a..92e2b03 100644
--- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
@@ -40,7 +40,7 @@ void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size) {
AddReplaceDelta(OrigOffset, -Size);
}
-void RewriteBuffer::InsertText(unsigned OrigOffset, const llvm::StringRef &Str,
+void RewriteBuffer::InsertText(unsigned OrigOffset, llvm::StringRef Str,
bool InsertAfter) {
// Nothing to insert, exit early.
@@ -57,7 +57,7 @@ void RewriteBuffer::InsertText(unsigned OrigOffset, const llvm::StringRef &Str,
/// buffer with a new string. This is effectively a combined "remove+insert"
/// operation.
void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
- const llvm::StringRef &NewStr) {
+ llvm::StringRef NewStr) {
unsigned RealOffset = getMappedOffset(OrigOffset, true);
Buffer.erase(RealOffset, OrigLength);
Buffer.insert(RealOffset, NewStr.begin(), NewStr.end());
@@ -72,7 +72,7 @@ void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
/// getRangeSize - Return the size in bytes of the specified range if they
/// are in the same file. If not, this returns -1.
-int Rewriter::getRangeSize(SourceRange Range) const {
+int Rewriter::getRangeSize(const CharSourceRange &Range) const {
if (!isRewritable(Range.getBegin()) ||
!isRewritable(Range.getEnd())) return -1;
@@ -97,12 +97,18 @@ int Rewriter::getRangeSize(SourceRange Range) const {
// Adjust the end offset to the end of the last token, instead of being the
- // start of the last token.
- EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ // start of the last token if this is a token range.
+ if (Range.isTokenRange())
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
return EndOff-StartOff;
}
+int Rewriter::getRangeSize(SourceRange Range) const {
+ return getRangeSize(CharSourceRange::getTokenRange(Range));
+}
+
+
/// getRewrittenText - Return the rewritten form of the text in the specified
/// range. If the start or end of the range was unrewritable or if they are
/// in different buffers, this returns an empty string.
@@ -179,7 +185,7 @@ RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
/// InsertText - Insert the specified string at the specified location in the
/// original buffer.
-bool Rewriter::InsertText(SourceLocation Loc, const llvm::StringRef &Str,
+bool Rewriter::InsertText(SourceLocation Loc, llvm::StringRef Str,
bool InsertAfter) {
if (!isRewritable(Loc)) return true;
FileID FID;
@@ -201,7 +207,7 @@ bool Rewriter::RemoveText(SourceLocation Start, unsigned Length) {
/// buffer with a new string. This is effectively a combined "remove/insert"
/// operation.
bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength,
- const llvm::StringRef &NewStr) {
+ llvm::StringRef NewStr) {
if (!isRewritable(Start)) return true;
FileID StartFileID;
unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID);
diff --git a/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt
index b54e8eb..70b4792 100644
--- a/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt
@@ -34,4 +34,5 @@ add_clang_library(clangSema
TargetAttributesSema.cpp
)
-add_dependencies(clangSema ClangDiagnosticSema ClangStmtNodes)
+add_dependencies(clangSema ClangARMNeon ClangAttrClasses ClangAttrList
+ ClangDiagnosticSema ClangDeclNodes ClangStmtNodes)
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
index 543c1b6..3431ac6 100644
--- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -65,6 +65,7 @@ class JumpScopeChecker {
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
+ void BuildScopeInformation(Decl *D, unsigned &ParentScope);
void BuildScopeInformation(Stmt *S, unsigned ParentScope);
void VerifyJumps();
void VerifyIndirectJumps();
@@ -130,11 +131,13 @@ static std::pair<unsigned,unsigned>
InDiag = diag::note_protected_by_variable_init;
CanQualType T = VD->getType()->getCanonicalTypeUnqualified();
- while (CanQual<ArrayType> AT = T->getAs<ArrayType>())
- T = AT->getElementType();
- if (CanQual<RecordType> RT = T->getAs<RecordType>())
- if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
- OutDiag = diag::note_exits_dtor;
+ if (!T->isDependentType()) {
+ while (CanQual<ArrayType> AT = T->getAs<ArrayType>())
+ T = AT->getElementType();
+ if (CanQual<RecordType> RT = T->getAs<RecordType>())
+ if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
+ OutDiag = diag::note_exits_dtor;
+ }
}
return std::make_pair(InDiag, OutDiag);
@@ -148,13 +151,33 @@ static std::pair<unsigned,unsigned>
return std::make_pair(0U, 0U);
}
+/// \brief Build scope information for a declaration that is part of a DeclStmt.
+void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
+ bool isCPlusPlus = this->S.getLangOptions().CPlusPlus;
+
+ // If this decl causes a new scope, push and switch to it.
+ std::pair<unsigned,unsigned> Diags
+ = GetDiagForGotoScopeDecl(D, isCPlusPlus);
+ if (Diags.first || Diags.second) {
+ Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
+ D->getLocation()));
+ ParentScope = Scopes.size()-1;
+ }
+
+ // If the decl has an initializer, walk it with the potentially new
+ // scope we just installed.
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *Init = VD->getInit())
+ BuildScopeInformation(Init, ParentScope);
+}
/// BuildScopeInformation - The statements from CI to CE are known to form a
/// coherent VLA scope with a specified parent node. Walk through the
/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
/// walking the AST as needed.
void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
-
+ bool SkipFirstSubStmt = false;
+
// If we found a label, remember that it is in ParentScope scope.
switch (S->getStmtClass()) {
case Stmt::LabelStmtClass:
@@ -172,8 +195,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
break;
- case Stmt::GotoStmtClass:
case Stmt::SwitchStmtClass:
+ // Evaluate the condition variable before entering the scope of the switch
+ // statement.
+ if (VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) {
+ BuildScopeInformation(Var, ParentScope);
+ SkipFirstSubStmt = true;
+ }
+ // Fall through
+
+ case Stmt::GotoStmtClass:
// Remember both what scope a goto is in as well as the fact that we have
// it. This makes the second scan not have to walk the AST again.
LabelAndGotoScopes[S] = ParentScope;
@@ -186,33 +217,22 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E;
++CI) {
+ if (SkipFirstSubStmt) {
+ SkipFirstSubStmt = false;
+ continue;
+ }
+
Stmt *SubStmt = *CI;
if (SubStmt == 0) continue;
- bool isCPlusPlus = this->S.getLangOptions().CPlusPlus;
-
// If this is a declstmt with a VLA definition, it defines a scope from here
// to the end of the containing context.
if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
// The decl statement creates a scope if any of the decls in it are VLAs
// or have the cleanup attribute.
for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
- I != E; ++I) {
- // If this decl causes a new scope, push and switch to it.
- std::pair<unsigned,unsigned> Diags
- = GetDiagForGotoScopeDecl(*I, isCPlusPlus);
- if (Diags.first || Diags.second) {
- Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
- (*I)->getLocation()));
- ParentScope = Scopes.size()-1;
- }
-
- // If the decl has an initializer, walk it with the potentially new
- // scope we just installed.
- if (VarDecl *VD = dyn_cast<VarDecl>(*I))
- if (Expr *Init = VD->getInit())
- BuildScopeInformation(Init, ParentScope);
- }
+ I != E; ++I)
+ BuildScopeInformation(*I, ParentScope);
continue;
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Lookup.h b/contrib/llvm/tools/clang/lib/Sema/Lookup.h
index 0961299..271bb5b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Lookup.h
+++ b/contrib/llvm/tools/clang/lib/Sema/Lookup.h
@@ -424,6 +424,11 @@ public:
Diagnose = false;
}
+ /// Determines whether this lookup is suppressing diagnostics.
+ bool isSuppressingDiagnostics() const {
+ return Diagnose;
+ }
+
/// Sets a 'context' source range.
void setContextRange(SourceRange SR) {
NameContextRange = SR;
diff --git a/contrib/llvm/tools/clang/lib/Sema/Makefile b/contrib/llvm/tools/clang/lib/Sema/Makefile
index 3a5a99a..90f2dff 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Makefile
+++ b/contrib/llvm/tools/clang/lib/Sema/Makefile
@@ -12,11 +12,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangSema
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
index 523b196..cddc84e 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/APFloat.h"
+#include "clang/Sema/ExternalSemaSource.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
@@ -43,7 +44,10 @@ void Sema::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) {
TUScope = S;
PushDeclContext(S, Context.getTranslationUnitDecl());
- if (PP.getTargetInfo().getPointerWidth(0) >= 64) {
+ VAListTagName = PP.getIdentifierInfo("__va_list_tag");
+
+ if (!Context.isInt128Installed() && // May be set by PCHReader.
+ PP.getTargetInfo().getPointerWidth(0) >= 64) {
TypeSourceInfo *TInfo;
// Install [u]int128_t for 64-bit targets.
@@ -58,6 +62,7 @@ void Sema::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) {
SourceLocation(),
&Context.Idents.get("__uint128_t"),
TInfo), TUScope);
+ Context.setInt128Installed();
}
@@ -122,8 +127,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
IdResolver(pp.getLangOptions()), StdNamespace(0), StdBadAlloc(0),
GlobalNewDeleteDeclared(false),
CompleteTranslationUnit(CompleteTranslationUnit),
- NumSFINAEErrors(0), NonInstantiationEntries(0),
- CurrentInstantiationScope(0), TyposCorrected(0),
+ NumSFINAEErrors(0), SuppressAccessChecking(false),
+ NonInstantiationEntries(0), CurrentInstantiationScope(0), TyposCorrected(0),
AnalysisWarnings(*this)
{
TUScope = 0;
@@ -223,7 +228,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// Remove functions that turned out to be used.
UnusedStaticFuncs.erase(std::remove_if(UnusedStaticFuncs.begin(),
UnusedStaticFuncs.end(),
- std::mem_fun(&FunctionDecl::isUsed)),
+ std::bind2nd(std::mem_fun(&FunctionDecl::isUsed),
+ true)),
UnusedStaticFuncs.end());
// Check for #pragma weak identifiers that were never declared
@@ -381,6 +387,34 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
return Builder;
}
+/// \brief Determines the active Scope associated with the given declaration
+/// context.
+///
+/// This routine maps a declaration context to the active Scope object that
+/// represents that declaration context in the parser. It is typically used
+/// from "scope-less" code (e.g., template instantiation, lazy creation of
+/// declarations) that injects a name for name-lookup purposes and, therefore,
+/// must update the Scope.
+///
+/// \returns The scope corresponding to the given declaraion context, or NULL
+/// if no such scope is open.
+Scope *Sema::getScopeForContext(DeclContext *Ctx) {
+
+ if (!Ctx)
+ return 0;
+
+ Ctx = Ctx->getPrimaryContext();
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ // Ignore scopes that cannot have declarations. This is important for
+ // out-of-line definitions of static class members.
+ if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
+ if (DeclContext *Entity = static_cast<DeclContext *> (S->getEntity()))
+ if (Ctx == Entity->getPrimaryContext())
+ return S;
+ }
+
+ return 0;
+}
/// \brief Enter a new function scope
void Sema::PushFunctionScope() {
@@ -425,3 +459,6 @@ BlockScopeInfo *Sema::getCurBlock() {
return dyn_cast<BlockScopeInfo>(FunctionScopes.back());
}
+
+// Pin this vtable to this file.
+ExternalSemaSource::~ExternalSemaSource() {}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.h b/contrib/llvm/tools/clang/lib/Sema/Sema.h
index dfc45ac..8336918 100644
--- a/contrib/llvm/tools/clang/lib/Sema/Sema.h
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.h
@@ -118,7 +118,8 @@ struct FunctionScopeInfo {
/// \brief Set true when a function, method contains a VLA or ObjC try block,
/// which introduce scopes that need to be checked for goto conditions. If a
- /// function does not contain this, then it need not have the jump checker run on it.
+ /// function does not contain this, then it need not have the jump checker run
+ /// on it.
bool NeedsScopeChecking;
/// \brief The number of errors that had occurred before starting this
@@ -155,24 +156,25 @@ struct FunctionScopeInfo {
/// \brief Retains information about a block that is currently being parsed.
struct BlockScopeInfo : FunctionScopeInfo {
- llvm::SmallVector<ParmVarDecl*, 8> Params;
- bool hasPrototype;
- bool isVariadic;
bool hasBlockDeclRefExprs;
BlockDecl *TheDecl;
-
+
/// TheScope - This is the scope for the block itself, which contains
/// arguments etc.
Scope *TheScope;
- /// ReturnType - This will get set to block result type, by looking at
- /// return types, if any, in the block body.
+ /// ReturnType - The return type of the block, or null if the block
+ /// signature didn't provide an explicit return type.
QualType ReturnType;
+ /// BlockType - The function type of the block, if one was given.
+ /// Its return type may be BuiltinType::Dependent.
+ QualType FunctionType;
+
BlockScopeInfo(unsigned NumErrors, Scope *BlockScope, BlockDecl *Block)
- : FunctionScopeInfo(NumErrors), hasPrototype(false), isVariadic(false),
- hasBlockDeclRefExprs(false), TheDecl(Block), TheScope(BlockScope)
+ : FunctionScopeInfo(NumErrors), hasBlockDeclRefExprs(false),
+ TheDecl(Block), TheScope(BlockScope)
{
IsBlockInfo = true;
}
@@ -239,6 +241,10 @@ public:
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
+ /// VAListTagName - The declaration name corresponding to __va_list_tag.
+ /// This is used as part of a hack to omit that class from ADL results.
+ DeclarationName VAListTagName;
+
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
@@ -669,6 +675,8 @@ public:
virtual void ActOnEndOfTranslationUnit();
+ Scope *getScopeForContext(DeclContext *Ctx);
+
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
void PopFunctionOrBlockScope();
@@ -713,9 +721,13 @@ public:
//
QualType adjustParameterType(QualType T);
- QualType BuildPointerType(QualType T, unsigned Quals,
+ QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs);
+ QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVR) {
+ return BuildQualifiedType(T, Loc, Qualifiers::fromCVRMask(CVR));
+ }
+ QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
- QualType BuildReferenceType(QualType T, bool LValueRef, unsigned Quals,
+ QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
@@ -727,13 +739,12 @@ public:
bool Variadic, unsigned Quals,
SourceLocation Loc, DeclarationName Entity);
QualType BuildMemberPointerType(QualType T, QualType Class,
- unsigned Quals, SourceLocation Loc,
+ SourceLocation Loc,
DeclarationName Entity);
- QualType BuildBlockPointerType(QualType T, unsigned Quals,
+ QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
- QualType GetTypeForDeclarator(Declarator &D, Scope *S,
- TypeSourceInfo **TInfo = 0,
- TagDecl **OwnedDecl = 0);
+ TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S,
+ TagDecl **OwnedDecl = 0);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo.
@@ -761,8 +772,6 @@ public:
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
- bool UnwrapSimilarPointerTypes(QualType& T1, QualType& T2);
-
virtual TypeResult ActOnTypeName(Scope *S, Declarator &D);
bool RequireCompleteType(SourceLocation Loc, QualType T,
@@ -837,6 +846,9 @@ public:
bool &OverloadableAttrRequired);
void CheckMain(FunctionDecl *FD);
virtual DeclPtrTy ActOnParamDeclarator(Scope *S, Declarator &D);
+ ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
+ SourceLocation Loc,
+ QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC,
TypeSourceInfo *TSInfo, QualType T,
IdentifierInfo *Name,
@@ -1094,10 +1106,19 @@ public:
/// non-function.
Ovl_NonFunction
};
- OverloadKind CheckOverload(FunctionDecl *New,
+ OverloadKind CheckOverload(Scope *S,
+ FunctionDecl *New,
const LookupResult &OldDecls,
- NamedDecl *&OldDecl);
- bool IsOverload(FunctionDecl *New, FunctionDecl *Old);
+ NamedDecl *&OldDecl,
+ bool IsForUsingDecl);
+ bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
+
+ bool TryImplicitConversion(InitializationSequence &Sequence,
+ const InitializedEntity &Entity,
+ Expr *From,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution);
ImplicitConversionSequence
TryImplicitConversion(Expr* From, QualType ToType,
@@ -1170,6 +1191,16 @@ public:
ImplicitConversionSequence TryContextuallyConvertToObjCId(Expr *From);
bool PerformContextuallyConvertToObjCId(Expr *&From);
+ OwningExprResult
+ ConvertToIntegralOrEnumerationType(SourceLocation Loc, ExprArg FromE,
+ const PartialDiagnostic &NotIntDiag,
+ const PartialDiagnostic &IncompleteDiag,
+ const PartialDiagnostic &ExplicitConvDiag,
+ const PartialDiagnostic &ExplicitConvNote,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &AmbigNote,
+ const PartialDiagnostic &ConvDiag);
+
bool PerformObjectMemberConversion(Expr *&From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
@@ -1448,6 +1479,8 @@ public:
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
+ DeclContext::lookup_result LookupConstructors(CXXRecordDecl *Class);
+ CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
Expr **Args, unsigned NumArgs,
@@ -1457,7 +1490,7 @@ public:
VisibleDeclConsumer &Consumer);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer);
-
+
/// \brief The context in which typo-correction occurs.
///
/// The typo-correction context affects which keywords (if any) are
@@ -1508,7 +1541,7 @@ public:
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
- void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AttrList);
+ void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL);
void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
bool &IncompleteImpl, unsigned DiagID);
@@ -1555,16 +1588,9 @@ public:
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void CollectImmediateProperties(ObjCContainerDecl *CDecl,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap);
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
- /// ProtocolConformsToSuperClass - Returns true if class has a super class
- /// and it, or its nested super class conforms to the protocol.
- bool ProtocolConformsToSuperClass(const ObjCInterfaceDecl *IDecl,
- const ObjCProtocolDecl *PDecl);
- /// ProtocolConformsToProtocol - Returns true if 2nd Protocol (PDecl) is
- /// qualified by the 1st.
- bool ProtocolConformsToProtocol(const ObjCProtocolDecl *NestedProtocol,
- const ObjCProtocolDecl *PDecl);
/// LookupPropertyDecl - Looks up a property in the current class and all
/// its protocols.
@@ -1583,7 +1609,7 @@ public:
const bool isReadWrite,
const unsigned Attributes,
bool *isOverridingProperty,
- QualType T,
+ TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
@@ -1596,7 +1622,8 @@ public:
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
- const unsigned Attributes, QualType T,
+ const unsigned Attributes,
+ TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
@@ -1935,7 +1962,8 @@ public:
OwningExprResult LookupMemberExpr(LookupResult &R, Expr *&Base,
bool &IsArrow, SourceLocation OpLoc,
CXXScopeSpec &SS,
- DeclPtrTy ObjCImpDecl);
+ DeclPtrTy ObjCImpDecl,
+ bool HasTemplateArgs);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
@@ -2100,6 +2128,7 @@ public:
AttributeList *AttrList);
virtual void ActOnFinishNamespaceDef(DeclPtrTy Dcl, SourceLocation RBrace);
+ NamespaceDecl *getStdNamespace();
virtual DeclPtrTy ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
@@ -2196,26 +2225,69 @@ public:
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
+ /// \brief Declare the implicit default constructor for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// default constructor will be added.
+ ///
+ /// \returns The implicitly-declared default constructor.
+ CXXConstructorDecl *DeclareImplicitDefaultConstructor(
+ CXXRecordDecl *ClassDecl);
+
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
+ /// \brief Declare the implicit destructor for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// destructor will be added.
+ ///
+ /// \returns The implicitly-declared destructor.
+ CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
+
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
- CXXDestructorDecl *Destructor);
+ CXXDestructorDecl *Destructor);
+ /// \brief Declare the implicit copy constructor for the given class.
+ ///
+ /// \param S The scope of the class, which may be NULL if this is a
+ /// template instantiation.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// copy constructor will be added.
+ ///
+ /// \returns The implicitly-declared copy constructor.
+ CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
+
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor,
unsigned TypeQuals);
- /// \brief Defined and implicitly-declared copy assignment operator.
+ /// \brief Declare the implicit copy assignment operator for the given class.
+ ///
+ /// \param S The scope of the class, which may be NULL if this is a
+ /// template instantiation.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// copy-assignment operator will be added.
+ ///
+ /// \returns The implicitly-declared copy assignment operator.
+ CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
+
+ /// \brief Defined an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
+ /// \brief Force the declaration of any implicitly-declared members of this
+ /// class.
+ void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
+
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
@@ -2295,7 +2367,7 @@ public:
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId, Declarator &D,
+ SourceRange TypeIdParens, Declarator &D,
SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
SourceLocation ConstructorRParen);
@@ -2303,7 +2375,7 @@ public:
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId,
+ SourceRange TypeIdParens,
QualType AllocType,
SourceLocation TypeLoc,
SourceRange TypeRange,
@@ -2529,6 +2601,10 @@ public:
virtual bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS);
+ virtual DeclPtrTy ActOnAccessSpecifier(AccessSpecifier Access,
+ SourceLocation ASLoc,
+ SourceLocation ColonLoc);
+
virtual DeclPtrTy ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
@@ -2605,14 +2681,14 @@ public:
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
- void AddImplicitlyDeclaredMembersToClass(Scope *S, CXXRecordDecl *ClassDecl);
+ void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
virtual void ActOnMemInitializers(DeclPtrTy ConstructorDecl,
SourceLocation ColonLoc,
MemInitTy **MemInits, unsigned NumMemInits,
bool AnyErrors);
- void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
+ void CheckCompletedCXXClass(CXXRecordDecl *Record);
virtual void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
DeclPtrTy TagDecl,
SourceLocation LBrac,
@@ -2644,7 +2720,7 @@ public:
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
FunctionDecl::StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
- QualType CheckDestructorDeclarator(Declarator &D,
+ QualType CheckDestructorDeclarator(Declarator &D, QualType R,
FunctionDecl::StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
@@ -2718,6 +2794,7 @@ public:
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
+
//===--------------------------------------------------------------------===//
// C++ Access Control
//
@@ -2744,7 +2821,8 @@ public:
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
- AccessSpecifier Access);
+ AccessSpecifier Access,
+ bool IsCopyBindingRefToTemp = false);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag);
@@ -2772,6 +2850,12 @@ public:
void HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *Ctx);
+ /// A flag to suppress access checking.
+ bool SuppressAccessChecking;
+
+ void ActOnStartSuppressingAccessChecks();
+ void ActOnStopSuppressingAccessChecks();
+
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
@@ -2826,29 +2910,25 @@ public:
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
- unsigned Depth, unsigned Position);
- virtual void ActOnTypeParameterDefault(DeclPtrTy TypeParam,
- SourceLocation EqualLoc,
- SourceLocation DefaultLoc,
- TypeTy *Default);
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ TypeTy *DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
virtual DeclPtrTy ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
- unsigned Position);
- virtual void ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParam,
- SourceLocation EqualLoc,
- ExprArg Default);
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ExprArg DefaultArg);
virtual DeclPtrTy ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParamsTy *Params,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
- unsigned Position);
- virtual void ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParam,
- SourceLocation EqualLoc,
- const ParsedTemplateArgument &Default);
+ unsigned Position,
+ SourceLocation EqualLoc,
+ const ParsedTemplateArgument &DefaultArg);
virtual TemplateParamsTy *
ActOnTemplateParameterList(unsigned Depth,
@@ -2876,7 +2956,8 @@ public:
TemplateParameterList **ParamLists,
unsigned NumParamLists,
bool IsFriend,
- bool &IsExplicitSpecialization);
+ bool &IsExplicitSpecialization,
+ bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
@@ -2912,11 +2993,13 @@ public:
SourceLocation NameLoc,
const TemplateArgumentListInfo &TemplateArgs);
- virtual TemplateTy ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
- CXXScopeSpec &SS,
- UnqualifiedId &Name,
- TypeTy *ObjectType,
- bool EnteringContext);
+ virtual TemplateNameKind ActOnDependentTemplateName(Scope *S,
+ SourceLocation TemplateKWLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ TypeTy *ObjectType,
+ bool EnteringContext,
+ TemplateTy &Template);
bool CheckClassTemplatePartialSpecializationArgs(
TemplateParameterList *TemplateParams,
@@ -2940,7 +3023,7 @@ public:
Declarator &D);
virtual DeclPtrTy ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
- MultiTemplateParamsArg TemplateParameterLists,
+ MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
@@ -2948,7 +3031,7 @@ public:
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
- SourceLocation PrevPointOfInstantiation,
+ SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
@@ -3096,25 +3179,29 @@ public:
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
+ /// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
virtual TypeResult
- ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- const IdentifierInfo &II, SourceLocation IdLoc);
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
+ /// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param Ty the type that the typename specifier refers to.
virtual TypeResult
- ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- SourceLocation TemplateLoc, TypeTy *Ty);
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, SourceLocation TemplateLoc,
+ TypeTy *Ty);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
@@ -3478,6 +3565,12 @@ public:
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
+ /// \brief The stack of calls expression undergoing template instantiation.
+ ///
+ /// The top of this stack is used by a fixit instantiating unresolved
+ /// function calls to fix the AST to match the textual change it prints.
+ llvm::SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
+
/// \brief A stack object to be created when performing template
/// instantiation.
///
@@ -3878,7 +3971,7 @@ public:
SourceLocation *IdentLocs,
unsigned NumElts);
- virtual DeclPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
+ virtual DeclPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
@@ -4057,6 +4150,9 @@ public:
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
+ /// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
+ void AddAlignedAttr(SourceLocation AttrLoc, Decl *D, Expr *E);
+
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
@@ -4232,7 +4328,7 @@ public:
bool IgnoreBaseAccess = false);
bool PerformImplicitConversion(Expr *&From, QualType ToType,
const StandardConversionSequence& SCS,
- AssignmentAction Action, bool IgnoreBaseAccess);
+ AssignmentAction Action,bool IgnoreBaseAccess);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
@@ -4253,11 +4349,12 @@ public:
QualType CheckShiftOperands( // C99 6.5.7
Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
- Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc, bool isRelational);
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc,
+ bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
- Expr *&lex, Expr *&rex, SourceLocation OpLoc);
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
@@ -4413,6 +4510,7 @@ public:
//@{
virtual void CodeCompleteOrdinaryName(Scope *S,
CodeCompletionContext CompletionContext);
+ virtual void CodeCompleteExpression(Scope *S, QualType T);
virtual void CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *Base,
SourceLocation OpLoc,
bool IsArrow);
@@ -4420,6 +4518,10 @@ public:
virtual void CodeCompleteCase(Scope *S);
virtual void CodeCompleteCall(Scope *S, ExprTy *Fn,
ExprTy **Args, unsigned NumArgs);
+ virtual void CodeCompleteInitializer(Scope *S, DeclPtrTy D);
+ virtual void CodeCompleteReturn(Scope *S);
+ virtual void CodeCompleteAssignmentRHS(Scope *S, ExprTy *LHS);
+
virtual void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
virtual void CodeCompleteUsing(Scope *S);
@@ -4440,7 +4542,7 @@ public:
virtual void CodeCompleteObjCPropertySetter(Scope *S, DeclPtrTy ClassDecl,
DeclPtrTy *Methods,
unsigned NumMethods);
-
+ virtual void CodeCompleteObjCMessageReceiver(Scope *S);
virtual void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
IdentifierInfo **SelIdents,
unsigned NumSelIdents);
@@ -4473,6 +4575,13 @@ public:
bool IsInstanceMethod,
TypeTy *ReturnType,
DeclPtrTy IDecl);
+ virtual void CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ TypeTy *ReturnType,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents);
+
//@}
//===--------------------------------------------------------------------===//
@@ -4491,6 +4600,9 @@ private:
Action::OwningExprResult CheckBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall);
+ bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
@@ -4503,7 +4615,7 @@ private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinObjectSize(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
- bool SemaBuiltinAtomicOverloaded(CallExpr *TheCall);
+ OwningExprResult SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
index 444ee79..e110e3d 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -870,6 +870,10 @@ static void DiagnoseAccessPath(Sema &S,
<< BS->getSourceRange()
<< (BaseAccess == AS_protected)
<< (BS->getAccessSpecifierAsWritten() == AS_none);
+
+ if (D)
+ S.Diag(D->getLocation(), diag::note_field_decl);
+
return;
}
}
@@ -1020,6 +1024,9 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
if (Entity.getAccess() == AS_public)
return Sema::AR_accessible;
+ if (S.SuppressAccessChecking)
+ return Sema::AR_accessible;
+
// If we're currently parsing a top-level declaration, delay
// diagnostics. This is the only case where parsing a declaration
// can actually change our effective context for the purposes of
@@ -1153,9 +1160,10 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
/// Checks access to a constructor.
Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
- CXXConstructorDecl *Constructor,
- const InitializedEntity &Entity,
- AccessSpecifier Access) {
+ CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ bool IsCopyBindingRefToTemp) {
if (!getLangOptions().AccessControl ||
Access == AS_public)
return AR_accessible;
@@ -1166,7 +1174,9 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
QualType());
switch (Entity.getKind()) {
default:
- AccessEntity.setDiag(diag::err_access_ctor);
+ AccessEntity.setDiag(IsCopyBindingRefToTemp
+ ? diag::ext_rvalue_to_reference_access_ctor
+ : diag::err_access_ctor);
break;
case InitializedEntity::EK_Base:
@@ -1327,3 +1337,15 @@ void Sema::CheckLookupAccess(const LookupResult &R) {
}
}
}
+
+void Sema::ActOnStartSuppressingAccessChecks() {
+ assert(!SuppressAccessChecking &&
+ "Tried to start access check suppression when already started.");
+ SuppressAccessChecking = true;
+}
+
+void Sema::ActOnStopSuppressingAccessChecks() {
+ assert(SuppressAccessChecking &&
+ "Tried to stop access check suprression when already stopped.");
+ SuppressAccessChecking = false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
index 82978c9..69f27b0 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -135,13 +135,24 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
return;
}
- // We don't support #pragma options align=power.
switch (Kind) {
+ // For all targets we support native and natural are the same.
+ //
+ // FIXME: This is not true on Darwin/PPC.
+ case POAK_Native:
+ case POAK_Power:
case POAK_Natural:
Context->push(0);
Context->setAlignment(0);
break;
+ // Note that '#pragma options align=packed' is not equivalent to attribute
+ // packed, it has a different precedence relative to attribute aligned.
+ case POAK_Packed:
+ Context->push(0);
+ Context->setAlignment(1);
+ break;
+
case POAK_Mac68k:
// Check if the target supports this.
if (!PP.getTargetInfo().hasAlignMac68kSupport()) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
index 9b95552..b8e27e7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp
@@ -153,7 +153,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
case tok::kw_const_cast:
if (!TypeDependent)
CheckConstCast(*this, Ex, DestType, OpRange, DestRange);
- return Owned(new (Context) CXXConstCastExpr(DestType.getNonReferenceType(),
+ return Owned(new (Context) CXXConstCastExpr(
+ DestType.getNonLValueExprType(Context),
Ex, DestTInfo, OpLoc));
case tok::kw_dynamic_cast: {
@@ -161,7 +162,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
CXXBaseSpecifierArray BasePath;
if (!TypeDependent)
CheckDynamicCast(*this, Ex, DestType, OpRange, DestRange, Kind, BasePath);
- return Owned(new (Context)CXXDynamicCastExpr(DestType.getNonReferenceType(),
+ return Owned(new (Context)CXXDynamicCastExpr(
+ DestType.getNonLValueExprType(Context),
Kind, Ex, BasePath, DestTInfo,
OpLoc));
}
@@ -170,7 +172,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (!TypeDependent)
CheckReinterpretCast(*this, Ex, DestType, OpRange, DestRange, Kind);
return Owned(new (Context) CXXReinterpretCastExpr(
- DestType.getNonReferenceType(),
+ DestType.getNonLValueExprType(Context),
Kind, Ex, CXXBaseSpecifierArray(),
DestTInfo, OpLoc));
}
@@ -180,7 +182,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
if (!TypeDependent)
CheckStaticCast(*this, Ex, DestType, OpRange, Kind, BasePath);
- return Owned(new (Context) CXXStaticCastExpr(DestType.getNonReferenceType(),
+ return Owned(new (Context) CXXStaticCastExpr(
+ DestType.getNonLValueExprType(Context),
Kind, Ex, BasePath,
DestTInfo, OpLoc));
}
@@ -233,6 +236,15 @@ bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
T2 = T2MPType->getPointeeType();
return true;
}
+
+ const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(),
+ *T2BPType = T2->getAs<BlockPointerType>();
+ if (T1BPType && T2BPType) {
+ T1 = T1BPType->getPointeeType();
+ T2 = T2BPType->getPointeeType();
+ return true;
+ }
+
return false;
}
@@ -246,9 +258,11 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
// C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
// the rules are non-trivial. So first we construct Tcv *...cv* as described
// in C++ 5.2.11p8.
- assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType()) &&
+ assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) &&
"Source type is not pointer or pointer to member.");
- assert((DestType->isAnyPointerType() || DestType->isMemberPointerType()) &&
+ assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
+ DestType->isBlockPointerType()) &&
"Destination type is not pointer or pointer to member.");
QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType),
@@ -257,10 +271,16 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) {
// Find the qualifications.
while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
- cv1.push_back(UnwrappedSrcType.getQualifiers());
- cv2.push_back(UnwrappedDestType.getQualifiers());
+ Qualifiers SrcQuals;
+ Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
+ cv1.push_back(SrcQuals);
+
+ Qualifiers DestQuals;
+ Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals);
+ cv2.push_back(DestQuals);
}
- assert(cv1.size() > 0 && "Must have at least one pointer level.");
+ if (cv1.empty())
+ return false;
// Construct void pointers with those qualifiers (in reverse order of
// unwrapping, of course).
@@ -1014,7 +1034,7 @@ static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
// in multi-level pointers may change, but the level count must be the same,
// as must be the final pointee type.
while (SrcType != DestType &&
- Self.UnwrapSimilarPointerTypes(SrcType, DestType)) {
+ Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) {
Qualifiers Quals;
SrcType = Self.Context.getUnqualifiedArrayType(SrcType, Quals);
DestType = Self.Context.getUnqualifiedArrayType(DestType, Quals);
@@ -1032,6 +1052,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
const SourceRange &OpRange,
unsigned &msg,
CastExpr::CastKind &Kind) {
+ bool IsLValueCast = false;
+
DestType = Self.Context.getCanonicalType(DestType);
QualType SrcType = SrcExpr->getType();
if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
@@ -1049,6 +1071,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
// This code does this transformation for the checked types.
DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
SrcType = Self.Context.getPointerType(SrcType);
+ IsLValueCast = true;
}
// Canonicalize source for comparison.
@@ -1075,13 +1098,12 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
}
// A valid member pointer cast.
- Kind = CastExpr::CK_BitCast;
+ Kind = IsLValueCast? CastExpr::CK_LValueBitCast : CastExpr::CK_BitCast;
return TC_Success;
}
// See below for the enumeral issue.
- if (SrcType->isNullPtrType() && DestType->isIntegralType() &&
- !DestType->isEnumeralType()) {
+ if (SrcType->isNullPtrType() && DestType->isIntegralType(Self.Context)) {
// C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it. A value of std::nullptr_t can be
// converted to an integral type; the conversion has the same meaning
@@ -1098,9 +1120,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
bool destIsVector = DestType->isVectorType();
bool srcIsVector = SrcType->isVectorType();
if (srcIsVector || destIsVector) {
- bool srcIsScalar = SrcType->isIntegralType() && !SrcType->isEnumeralType();
- bool destIsScalar =
- DestType->isIntegralType() && !DestType->isEnumeralType();
+ // FIXME: Should this also apply to floating point types?
+ bool srcIsScalar = SrcType->isIntegralType(Self.Context);
+ bool destIsScalar = DestType->isIntegralType(Self.Context);
// Check if this is a cast between a vector and something else.
if (!(srcIsScalar && destIsVector) && !(srcIsVector && destIsScalar) &&
@@ -1124,8 +1146,10 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
return TC_Failed;
}
- bool destIsPtr = DestType->isAnyPointerType();
- bool srcIsPtr = SrcType->isAnyPointerType();
+ bool destIsPtr = DestType->isAnyPointerType() ||
+ DestType->isBlockPointerType();
+ bool srcIsPtr = SrcType->isAnyPointerType() ||
+ SrcType->isBlockPointerType();
if (!destIsPtr && !srcIsPtr) {
// Except for std::nullptr_t->integer and lvalue->reference, which are
// handled above, at least one of the two arguments must be a pointer.
@@ -1143,9 +1167,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
return TC_Success;
}
- // Note: Clang treats enumeration types as integral types. If this is ever
- // changed for C++, the additional check here will be redundant.
- if (DestType->isIntegralType() && !DestType->isEnumeralType()) {
+ if (DestType->isIntegralType(Self.Context)) {
assert(srcIsPtr && "One type must be a pointer");
// C++ 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it.
@@ -1158,7 +1180,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
return TC_Success;
}
- if (SrcType->isIntegralType() || SrcType->isEnumeralType()) {
+ if (SrcType->isIntegralOrEnumerationType()) {
assert(destIsPtr && "One type must be a pointer");
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
@@ -1178,14 +1200,22 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
msg = diag::err_bad_cxx_cast_const_away;
return TC_Failed;
}
+
+ // Cannot convert between block pointers and Objective-C object pointers.
+ if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) ||
+ (DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType()))
+ return TC_NotApplicable;
+
+ // Any pointer can be cast to an Objective-C pointer type with a C-style
+ // cast.
if (CStyle && DestType->isObjCObjectPointerType()) {
Kind = CastExpr::CK_AnyPointerToObjCPointerCast;
return TC_Success;
}
-
+
// Not casting away constness, so the only remaining check is for compatible
// pointer categories.
- Kind = CastExpr::CK_BitCast;
+ Kind = IsLValueCast? CastExpr::CK_LValueBitCast : CastExpr::CK_BitCast;
if (SrcType->isFunctionPointerType()) {
if (DestType->isFunctionPointerType()) {
@@ -1211,7 +1241,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr,
Self.Diag(OpRange.getBegin(), diag::ext_cast_fn_obj) << OpRange;
return TC_Success;
}
-
+
// C++ 5.2.10p7: A pointer to an object can be explicitly converted to
// a pointer to an object of different type.
// Void pointers are not specified, but supported by every compiler out there.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
index c0ec9e9..f56573a 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -96,7 +96,7 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
// injected class name of the named class template, we're entering
// into that class template definition.
QualType Injected
- = ClassTemplate->getInjectedClassNameSpecialization(Context);
+ = ClassTemplate->getInjectedClassNameSpecialization();
if (Context.hasSameType(Injected, ContextType))
return ClassTemplate->getTemplatedDecl();
@@ -458,8 +458,10 @@ Sema::CXXScopeTy *Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
Diag(ND->getLocation(), diag::note_previous_decl)
<< ND->getDeclName();
- } else
+ } else {
Found.clear();
+ Found.setLookupName(&II);
+ }
}
NamedDecl *SD = Found.getAsSingle<NamedDecl>();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index 6fc36c2..7ccd0c5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -26,7 +26,10 @@
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
#include <limits>
using namespace clang;
@@ -199,21 +202,119 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
- if (SemaBuiltinAtomicOverloaded(TheCall))
- return ExprError();
- break;
-
- // Target specific builtins start here.
+ return SemaBuiltinAtomicOverloaded(move(TheCallResult));
+ }
+
+ // Since the target specific builtins for each arch overlap, only check those
+ // of the arch we are compiling for.
+ if (BuiltinID >= Builtin::FirstTSBuiltin) {
+ switch (Context.Target.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ default:
+ break;
+ }
+ }
+
+ return move(TheCallResult);
+}
+
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ switch (BuiltinID) {
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr: {
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, 2, Result))
- return ExprError();
+ return true;
break;
}
}
+ return false;
+}
- return move(TheCallResult);
+// Get the valid immediate range for the specified NEON type code.
+static unsigned RFT(unsigned t, bool shift = false) {
+ bool quad = t & 0x10;
+
+ switch (t & 0x7) {
+ case 0: // i8
+ return shift ? 7 : (8 << (int)quad) - 1;
+ case 1: // i16
+ return shift ? 15 : (4 << (int)quad) - 1;
+ case 2: // i32
+ return shift ? 31 : (2 << (int)quad) - 1;
+ case 3: // i64
+ return shift ? 63 : (1 << (int)quad) - 1;
+ case 4: // f32
+ assert(!shift && "cannot shift float types!");
+ return (2 << (int)quad) - 1;
+ case 5: // poly8
+ assert(!shift && "cannot shift polynomial types!");
+ return (8 << (int)quad) - 1;
+ case 6: // poly16
+ assert(!shift && "cannot shift polynomial types!");
+ return (4 << (int)quad) - 1;
+ case 7: // float16
+ assert(!shift && "cannot shift float types!");
+ return (4 << (int)quad) - 1;
+ }
+ return 0;
+}
+
+bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ unsigned mask = 0;
+ unsigned TV = 0;
+ switch (BuiltinID) {
+#define GET_NEON_OVERLOAD_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_OVERLOAD_CHECK
+ }
+
+ // For NEON intrinsics which are overloaded on vector element type, validate
+ // the immediate which specifies which variant to emit.
+ if (mask) {
+ unsigned ArgNo = TheCall->getNumArgs()-1;
+ if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ return true;
+
+ TV = Result.getLimitedValue(32);
+ if ((TV > 31) || (mask & (1 << TV)) == 0)
+ return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ArgNo)->getSourceRange();
+ }
+
+ // For NEON intrinsics which take an immediate value as part of the
+ // instruction, range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+#define GET_NEON_IMMEDIATE_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_IMMEDIATE_CHECK
+ };
+
+ // Check that the immediate argument is actually a constant.
+ if (SemaBuiltinConstantArg(TheCall, i, Result))
+ return true;
+
+ // Range check against the upper/lower values for this isntruction.
+ unsigned Val = Result.getZExtValue();
+ if (Val < l || Val > (u + l))
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << llvm::utostr(l) << llvm::utostr(u+l)
+ << TheCall->getArg(i)->getSourceRange();
+
+ return false;
}
/// CheckFunctionCall - Check a direct function call for various correctness
@@ -279,32 +380,40 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
///
/// This function goes through and does final semantic checking for these
/// builtins,
-bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
+Sema::OwningExprResult
+Sema::SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult) {
+ CallExpr *TheCall = (CallExpr *)TheCallResult.get();
DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
// Ensure that we have at least one argument to do type inference from.
- if (TheCall->getNumArgs() < 1)
- return Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1 << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
+ if (TheCall->getNumArgs() < 1) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1 << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
// Inspect the first argument of the atomic builtin. This should always be
// a pointer type, whose element is an integral scalar or pointer type.
// Because it is a pointer type, we don't have to worry about any implicit
// casts here.
+ // FIXME: We don't allow floating point scalars as input.
Expr *FirstArg = TheCall->getArg(0);
- if (!FirstArg->getType()->isPointerType())
- return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ if (!FirstArg->getType()->isPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
- QualType ValType = FirstArg->getType()->getAs<PointerType>()->getPointeeType();
+ QualType ValType =
+ FirstArg->getType()->getAs<PointerType>()->getPointeeType();
if (!ValType->isIntegerType() && !ValType->isPointerType() &&
- !ValType->isBlockPointerType())
- return Diag(DRE->getLocStart(),
- diag::err_atomic_builtin_must_be_pointer_intptr)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ !ValType->isBlockPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
// We need to figure out which concrete builtin this maps onto. For example,
// __sync_fetch_and_add with a 2 byte object turns into
@@ -342,8 +451,9 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
case 8: SizeIndex = 3; break;
case 16: SizeIndex = 4; break;
default:
- return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
- << FirstArg->getType() << FirstArg->getSourceRange();
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
}
// Each of these builtins has one pointer argument, followed by some number of
@@ -383,12 +493,12 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
// Now that we know how many fixed arguments we expect, first check that we
// have at least that many.
- if (TheCall->getNumArgs() < 1+NumFixed)
- return Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1+NumFixed << TheCall->getNumArgs()
- << TheCall->getCallee()->getSourceRange();
-
+ if (TheCall->getNumArgs() < 1+NumFixed) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1+NumFixed << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
// Get the decl for the concrete builtin from this, we can tell what the
// concrete integer type we should convert to is.
@@ -400,6 +510,8 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
TUScope, false, DRE->getLocStart()));
const FunctionProtoType *BuiltinFT =
NewBuiltinDecl->getType()->getAs<FunctionProtoType>();
+
+ QualType OrigValType = ValType;
ValType = BuiltinFT->getArgType(0)->getAs<PointerType>()->getPointeeType();
// If the first type needs to be converted (e.g. void** -> int*), do it now.
@@ -426,7 +538,7 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
CastExpr::CastKind Kind = CastExpr::CK_Unknown;
CXXBaseSpecifierArray BasePath;
if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, BasePath))
- return true;
+ return ExprError();
// Okay, we have something that *can* be converted to the right type. Check
// to see if there is a potentially weird extension going on here. This can
@@ -448,10 +560,30 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
UsualUnaryConversions(PromotedCall);
TheCall->setCallee(PromotedCall);
-
// Change the result type of the call to match the result type of the decl.
- TheCall->setType(NewBuiltinDecl->getResultType());
- return false;
+ TheCall->setType(NewBuiltinDecl->getCallResultType());
+
+ // If the value type was converted to an integer when processing the
+ // arguments (e.g. void* -> int), we need to convert the result back.
+ if (!Context.hasSameUnqualifiedType(ValType, OrigValType)) {
+ Expr *E = TheCallResult.takeAs<Expr>();
+
+ assert(ValType->isIntegerType() &&
+ "We always convert atomic operation values to integers.");
+ // FIXME: Handle floating point value type here too.
+ CastExpr::CastKind Kind;
+ if (OrigValType->isIntegerType())
+ Kind = CastExpr::CK_IntegralCast;
+ else if (OrigValType->hasPointerRepresentation())
+ Kind = CastExpr::CK_IntegralToPointer;
+ else
+ llvm_unreachable("Unhandled original value type!");
+
+ ImpCastExprToType(E, OrigValType, Kind);
+ return Owned(E);
+ }
+
+ return move(TheCallResult);
}
@@ -511,7 +643,7 @@ bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
BlockScopeInfo *CurBlock = getCurBlock();
bool isVariadic;
if (CurBlock)
- isVariadic = CurBlock->isVariadic;
+ isVariadic = CurBlock->TheDecl->isVariadic();
else if (FunctionDecl *FD = getCurFunctionDecl())
isVariadic = FD->isVariadic();
else
@@ -633,45 +765,54 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
// This is declared to take (...), so we have to check everything.
Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
- if (TheCall->getNumArgs() < 3)
+ if (TheCall->getNumArgs() < 2)
return ExprError(Diag(TheCall->getLocEnd(),
diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << 3 << TheCall->getNumArgs()
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
<< TheCall->getSourceRange());
- unsigned numElements = std::numeric_limits<unsigned>::max();
+ // Determine which of the following types of shufflevector we're checking:
+ // 1) unary, vector mask: (lhs, mask)
+ // 2) binary, vector mask: (lhs, rhs, mask)
+ // 3) binary, scalar mask: (lhs, rhs, index, ..., index)
+ QualType resType = TheCall->getArg(0)->getType();
+ unsigned numElements = 0;
+
if (!TheCall->getArg(0)->isTypeDependent() &&
!TheCall->getArg(1)->isTypeDependent()) {
- QualType FAType = TheCall->getArg(0)->getType();
- QualType SAType = TheCall->getArg(1)->getType();
-
- if (!FAType->isVectorType() || !SAType->isVectorType()) {
+ QualType LHSType = TheCall->getArg(0)->getType();
+ QualType RHSType = TheCall->getArg(1)->getType();
+
+ if (!LHSType->isVectorType() || !RHSType->isVectorType()) {
Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector)
<< SourceRange(TheCall->getArg(0)->getLocStart(),
TheCall->getArg(1)->getLocEnd());
return ExprError();
}
-
- if (!Context.hasSameUnqualifiedType(FAType, SAType)) {
+
+ numElements = LHSType->getAs<VectorType>()->getNumElements();
+ unsigned numResElements = TheCall->getNumArgs() - 2;
+
+ // Check to see if we have a call with 2 vector arguments, the unary shuffle
+ // with mask. If so, verify that RHS is an integer vector type with the
+ // same number of elts as lhs.
+ if (TheCall->getNumArgs() == 2) {
+ if (!RHSType->isIntegerType() ||
+ RHSType->getAs<VectorType>()->getNumElements() != numElements)
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(1)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ numResElements = numElements;
+ }
+ else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
<< SourceRange(TheCall->getArg(0)->getLocStart(),
TheCall->getArg(1)->getLocEnd());
return ExprError();
- }
-
- numElements = FAType->getAs<VectorType>()->getNumElements();
- if (TheCall->getNumArgs() != numElements+2) {
- if (TheCall->getNumArgs() < numElements+2)
- return ExprError(Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_few_args)
- << 0 /*function call*/
- << numElements+2 << TheCall->getNumArgs()
- << TheCall->getSourceRange());
- return ExprError(Diag(TheCall->getLocEnd(),
- diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/
- << numElements+2 << TheCall->getNumArgs()
- << TheCall->getSourceRange());
+ } else if (numElements != numResElements) {
+ QualType eltType = LHSType->getAs<VectorType>()->getElementType();
+ resType = Context.getVectorType(eltType, numResElements,
+ VectorType::NotAltiVec);
}
}
@@ -680,9 +821,11 @@ Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getArg(i)->isValueDependent())
continue;
- llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, i, Result))
- return ExprError();
+ llvm::APSInt Result(32);
+ if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_nonconstant_argument)
+ << TheCall->getArg(i)->getSourceRange());
if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
return ExprError(Diag(TheCall->getLocStart(),
@@ -698,7 +841,7 @@ Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
}
return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(),
- exprs.size(), exprs[0]->getType(),
+ exprs.size(), resType,
TheCall->getCallee()->getLocStart(),
TheCall->getRParenLoc()));
}
@@ -1081,15 +1224,23 @@ public:
unsigned specifierLen);
private:
SourceRange getFormatStringRange();
- SourceRange getFormatSpecifierRange(const char *startSpecifier,
- unsigned specifierLen);
+ CharSourceRange getFormatSpecifierRange(const char *startSpecifier,
+ unsigned specifierLen);
SourceLocation getLocationOfByte(const char *x);
bool HandleAmount(const analyze_printf::OptionalAmount &Amt, unsigned k,
const char *startSpecifier, unsigned specifierLen);
- void HandleFlags(const analyze_printf::FormatSpecifier &FS,
- llvm::StringRef flag, llvm::StringRef cspec,
- const char *startSpecifier, unsigned specifierLen);
+ void HandleInvalidAmount(const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleFlag(const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleIgnoredFlag(const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
const Expr *getDataArg(unsigned i) const;
};
@@ -1099,10 +1250,15 @@ SourceRange CheckPrintfHandler::getFormatStringRange() {
return OrigFormatExpr->getSourceRange();
}
-SourceRange CheckPrintfHandler::
+CharSourceRange CheckPrintfHandler::
getFormatSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
- return SourceRange(getLocationOfByte(startSpecifier),
- getLocationOfByte(startSpecifier+specifierLen-1));
+ SourceLocation Start = getLocationOfByte(startSpecifier);
+ SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
+
+ // Advance the end SourceLocation by one due to half-open ranges.
+ End = End.getFileLocWithOffset(1);
+
+ return CharSourceRange::getCharRange(Start, End);
}
SourceLocation CheckPrintfHandler::getLocationOfByte(const char *x) {
@@ -1174,16 +1330,6 @@ const Expr *CheckPrintfHandler::getDataArg(unsigned i) const {
return TheCall->getArg(FirstDataArg + i);
}
-void CheckPrintfHandler::HandleFlags(const analyze_printf::FormatSpecifier &FS,
- llvm::StringRef flag,
- llvm::StringRef cspec,
- const char *startSpecifier,
- unsigned specifierLen) {
- const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier();
- S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_nonsensical_flag)
- << flag << cspec << getFormatSpecifierRange(startSpecifier, specifierLen);
-}
-
bool
CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt,
unsigned k, const char *startSpecifier,
@@ -1228,6 +1374,62 @@ CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt,
return true;
}
+void CheckPrintfHandler::HandleInvalidAmount(
+ const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier();
+ switch (Amt.getHowSpecified()) {
+ case analyze_printf::OptionalAmount::Constant:
+ S.Diag(getLocationOfByte(Amt.getStart()),
+ diag::warn_printf_nonsensical_optional_amount)
+ << type
+ << CS.toString()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << FixItHint::CreateRemoval(getFormatSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength()));
+ break;
+
+ default:
+ S.Diag(getLocationOfByte(Amt.getStart()),
+ diag::warn_printf_nonsensical_optional_amount)
+ << type
+ << CS.toString()
+ << getFormatSpecifierRange(startSpecifier, specifierLen);
+ break;
+ }
+}
+
+void CheckPrintfHandler::HandleFlag(const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about pointless flag with a fixit removal.
+ const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier();
+ S.Diag(getLocationOfByte(flag.getPosition()),
+ diag::warn_printf_nonsensical_flag)
+ << flag.toString() << CS.toString()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << FixItHint::CreateRemoval(getFormatSpecifierRange(flag.getPosition(), 1));
+}
+
+void CheckPrintfHandler::HandleIgnoredFlag(
+ const analyze_printf::FormatSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about ignored flag with a fixit removal.
+ S.Diag(getLocationOfByte(ignoredFlag.getPosition()),
+ diag::warn_printf_ignored_flag)
+ << ignoredFlag.toString() << flag.toString()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << FixItHint::CreateRemoval(getFormatSpecifierRange(
+ ignoredFlag.getPosition(), 1));
+}
+
bool
CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
&FS,
@@ -1315,34 +1517,57 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
return HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen);
}
- // Are we using '%n'? Issue a warning about this being
- // a possible security issue.
+ // Check for invalid use of field width
+ if (!FS.hasValidFieldWidth()) {
+ HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen);
+ }
+
+ // Check for invalid use of precision
+ if (!FS.hasValidPrecision()) {
+ HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen);
+ }
+
+ // Check each flag does not conflict with any other component.
+ if (!FS.hasValidLeadingZeros())
+ HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
+ if (!FS.hasValidPlusPrefix())
+ HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidSpacePrefix())
+ HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidAlternativeForm())
+ HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
+ if (!FS.hasValidLeftJustified())
+ HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
+
+ // Check that flags are not ignored by another flag
+ if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
+ HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
+ startSpecifier, specifierLen);
+ if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
+ HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
+ startSpecifier, specifierLen);
+
+ // Check the length modifier is valid with the given conversion specifier.
+ const LengthModifier &LM = FS.getLengthModifier();
+ if (!FS.hasValidLengthModifier())
+ S.Diag(getLocationOfByte(LM.getStart()),
+ diag::warn_printf_nonsensical_length)
+ << LM.toString() << CS.toString()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << FixItHint::CreateRemoval(getFormatSpecifierRange(LM.getStart(),
+ LM.getLength()));
+
+ // Are we using '%n'?
if (CS.getKind() == ConversionSpecifier::OutIntPtrArg) {
+ // Issue a warning about this being a possible security issue.
S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_write_back)
<< getFormatSpecifierRange(startSpecifier, specifierLen);
// Continue checking the other format specifiers.
return true;
}
- if (CS.getKind() == ConversionSpecifier::VoidPtrArg) {
- if (FS.getPrecision().getHowSpecified() != OptionalAmount::NotSpecified)
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_nonsensical_precision)
- << CS.getCharacters()
- << getFormatSpecifierRange(startSpecifier, specifierLen);
- }
- if (CS.getKind() == ConversionSpecifier::VoidPtrArg ||
- CS.getKind() == ConversionSpecifier::CStrArg) {
- // FIXME: Instead of using "0", "+", etc., eventually get them from
- // the FormatSpecifier.
- if (FS.hasLeadingZeros())
- HandleFlags(FS, "0", CS.getCharacters(), startSpecifier, specifierLen);
- if (FS.hasPlusPrefix())
- HandleFlags(FS, "+", CS.getCharacters(), startSpecifier, specifierLen);
- if (FS.hasSpacePrefix())
- HandleFlags(FS, " ", CS.getCharacters(), startSpecifier, specifierLen);
- }
-
// The remaining checks depend on the data arguments.
if (HasVAListArg)
return true;
@@ -1377,11 +1602,32 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
if (ATR.matchesType(S.Context, ICE->getSubExpr()->getType()))
return true;
- S.Diag(getLocationOfByte(CS.getStart()),
- diag::warn_printf_conversion_argument_type_mismatch)
- << ATR.getRepresentativeType(S.Context) << Ex->getType()
- << getFormatSpecifierRange(startSpecifier, specifierLen)
- << Ex->getSourceRange();
+ // We may be able to offer a FixItHint if it is a supported type.
+ FormatSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(Ex->getType());
+
+ if (success) {
+ // Get the fix string from the fixed format specifier
+ llvm::SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ S.Diag(getLocationOfByte(CS.getStart()),
+ diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeType(S.Context) << Ex->getType()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange()
+ << FixItHint::CreateReplacement(
+ getFormatSpecifierRange(startSpecifier, specifierLen),
+ os.str());
+ }
+ else {
+ S.Diag(getLocationOfByte(CS.getStart()),
+ diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeType(S.Context) << Ex->getType()
+ << getFormatSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange();
+ }
}
return true;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
index d8c1a5c..5528875 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -119,10 +119,19 @@ namespace {
/// nested-name-specifiers that would otherwise be filtered out.
bool AllowNestedNameSpecifiers;
+ /// \brief If set, the type that we would prefer our resulting value
+ /// declarations to have.
+ ///
+ /// Closely matching the preferred type gives a boost to a result's
+ /// priority.
+ CanQualType PreferredType;
+
/// \brief A list of shadow maps, which is used to model name hiding at
/// different levels of, e.g., the inheritance hierarchy.
std::list<ShadowMap> ShadowMaps;
+ void AdjustResultPriorityForPreferredType(Result &R);
+
public:
explicit ResultBuilder(Sema &SemaRef, LookupFilter Filter = 0)
: SemaRef(SemaRef), Filter(Filter), AllowNestedNameSpecifiers(false) { }
@@ -147,6 +156,11 @@ namespace {
unsigned size() const { return Results.size(); }
bool empty() const { return Results.empty(); }
+ /// \brief Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
+ }
+
/// \brief Specify whether nested-name-specifiers are allowed.
void allowNestedNameSpecifiers(bool Allow = true) {
AllowNestedNameSpecifiers = Allow;
@@ -212,6 +226,7 @@ namespace {
///
//@{
bool IsOrdinaryName(NamedDecl *ND) const;
+ bool IsOrdinaryNonTypeName(NamedDecl *ND) const;
bool IsOrdinaryNonValueName(NamedDecl *ND) const;
bool IsNestedNameSpecifier(NamedDecl *ND) const;
bool IsEnum(NamedDecl *ND) const;
@@ -222,6 +237,7 @@ namespace {
bool IsType(NamedDecl *ND) const;
bool IsMember(NamedDecl *ND) const;
bool IsObjCIvar(NamedDecl *ND) const;
+ bool IsObjCMessageReceiver(NamedDecl *ND) const;
//@}
};
}
@@ -355,8 +371,6 @@ getRequiredQualification(ASTContext &Context,
Result = NestedNameSpecifier::Create(Context, Result,
false,
Context.getTypeDeclType(TD).getTypePtr());
- else
- assert(Parent->isTranslationUnit());
}
return Result;
}
@@ -393,13 +407,16 @@ bool ResultBuilder::isInterestingDecl(NamedDecl *ND,
return false;
// Filter out names reserved for the implementation (C99 7.1.3,
- // C++ [lib.global.names]). Users don't need to see those.
+ // C++ [lib.global.names]) if they come from a system header.
//
// FIXME: Add predicate for this.
if (Id->getLength() >= 2) {
const char *Name = Id->getNameStart();
if (Name[0] == '_' &&
- (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')))
+ (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')) &&
+ (ND->getLocation().isInvalid() ||
+ SemaRef.SourceMgr.isInSystemHeader(
+ SemaRef.SourceMgr.getSpellingLoc(ND->getLocation()))))
return false;
}
}
@@ -458,6 +475,134 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
return false;
}
+enum SimplifiedTypeClass {
+ STC_Arithmetic,
+ STC_Array,
+ STC_Block,
+ STC_Function,
+ STC_ObjectiveC,
+ STC_Other,
+ STC_Pointer,
+ STC_Record,
+ STC_Void
+};
+
+/// \brief A simplified classification of types used to determine whether two
+/// types are "similar enough" when adjusting priorities.
+static SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T) {
+ switch (T->getTypeClass()) {
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::Void:
+ return STC_Void;
+
+ case BuiltinType::NullPtr:
+ return STC_Pointer;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::UndeducedAuto:
+ return STC_Other;
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Arithmetic;
+ }
+ return STC_Other;
+
+ case Type::Complex:
+ return STC_Arithmetic;
+
+ case Type::Pointer:
+ return STC_Pointer;
+
+ case Type::BlockPointer:
+ return STC_Block;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType());
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ return STC_Array;
+
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ return STC_Arithmetic;
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return STC_Function;
+
+ case Type::Record:
+ return STC_Record;
+
+ case Type::Enum:
+ return STC_Arithmetic;
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Other;
+ }
+}
+
+/// \brief Get the type that a given expression will have if this declaration
+/// is used as an expression in its "typical" code-completion form.
+static QualType getDeclUsageType(ASTContext &C, NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ return C.getTypeDeclType(Type);
+ if (ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
+ return C.getObjCInterfaceType(Iface);
+
+ QualType T;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
+ T = Function->getCallResultType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ T = Method->getSendResultType();
+ else if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND))
+ T = FunTmpl->getTemplatedDecl()->getCallResultType();
+ else if (EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
+ else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ T = Property->getType();
+ else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ T = Value->getType();
+ else
+ return QualType();
+
+ return T.getNonReferenceType();
+}
+
+void ResultBuilder::AdjustResultPriorityForPreferredType(Result &R) {
+ QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ if (T.isNull())
+ return;
+
+ CanQualType TC = SemaRef.Context.getCanonicalType(T);
+ // Check for exactly-matching types (modulo qualifiers).
+ if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
+ R.Priority /= CCF_ExactTypeMatch;
+ // Check for nearly-matching types, based on classification of each.
+ else if ((getSimplifiedTypeClass(PreferredType)
+ == getSimplifiedTypeClass(TC)) &&
+ !(PreferredType->isEnumeralType() && TC->isEnumeralType()))
+ R.Priority /= CCF_SimilarTypeMatch;
+}
+
void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
assert(!ShadowMaps.empty() && "Must enter into a results scope");
@@ -542,8 +687,9 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
- }
-
+ } else if (!PreferredType.isNull())
+ AdjustResultPriorityForPreferredType(R);
+
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
@@ -616,6 +762,9 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (InBaseClass)
R.Priority += CCD_InBaseClass;
+ if (!PreferredType.isNull())
+ AdjustResultPriorityForPreferredType(R);
+
// Insert this result into the set of results.
Results.push_back(R);
}
@@ -645,9 +794,11 @@ void ResultBuilder::ExitScope() {
/// \brief Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
unsigned IDNS = Decl::IDNS_Ordinary;
if (SemaRef.getLangOptions().CPlusPlus)
- IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND))
return true;
@@ -655,14 +806,33 @@ bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const {
}
/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup but is not a type name.
+bool ResultBuilder::IsOrdinaryNonTypeName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
+ return false;
+
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ if (SemaRef.getLangOptions().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
+ else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND))
+ return true;
+
+ return ND->getIdentifierNamespace() & IDNS;
+}
+
+/// \brief Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryNonValueName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
unsigned IDNS = Decl::IDNS_Ordinary;
if (SemaRef.getLangOptions().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
return (ND->getIdentifierNamespace() & IDNS) &&
- !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND);
+ !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
+ !isa<ObjCPropertyDecl>(ND);
}
/// \brief Determines whether the given declaration is suitable as the
@@ -732,6 +902,49 @@ bool ResultBuilder::IsMember(NamedDecl *ND) const {
isa<ObjCPropertyDecl>(ND);
}
+static bool isObjCReceiverType(ASTContext &C, QualType T) {
+ T = C.getCanonicalType(T);
+ switch (T->getTypeClass()) {
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return true;
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+
+ default:
+ break;
+ }
+
+ if (!C.getLangOptions().CPlusPlus)
+ return false;
+
+ // FIXME: We could perform more analysis here to determine whether a
+ // particular class type has any conversions to Objective-C types. For now,
+ // just accept all class types.
+ return T->isDependentType() || T->isRecordType();
+}
+
+bool ResultBuilder::IsObjCMessageReceiver(NamedDecl *ND) const {
+ QualType T = getDeclUsageType(SemaRef.Context, ND);
+ if (T.isNull())
+ return false;
+
+ T = SemaRef.Context.getBaseElementType(T);
+ return isObjCReceiverType(SemaRef.Context, T);
+}
+
+
/// \rief Determines whether the given declaration is an Objective-C
/// instance variable.
bool ResultBuilder::IsObjCIvar(NamedDecl *ND) const {
@@ -788,27 +1001,26 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
- if (Results.includeCodePatterns()) {
- // typename qualified-id
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typename");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualified-id");
- Results.AddResult(Result(Pattern));
- }
+ // typename qualified-id
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("typename");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("qualifier");
+ Pattern->AddTextChunk("::");
+ Pattern->AddPlaceholderChunk("name");
+ Results.AddResult(Result(Pattern));
if (LangOpts.CPlusPlus0x) {
Results.AddResult(Result("auto", CCP_Type));
Results.AddResult(Result("char16_t", CCP_Type));
Results.AddResult(Result("char32_t", CCP_Type));
- if (Results.includeCodePatterns()) {
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("decltype");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
- }
+
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("decltype");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
}
}
@@ -819,14 +1031,18 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts,
// Results.AddResult(Result("_Decimal64"));
// Results.AddResult(Result("_Decimal128"));
- if (Results.includeCodePatterns()) {
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typeof");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
- }
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("typeof");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Pattern));
+
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("typeof");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
}
}
@@ -887,6 +1103,44 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
bool NeedAt);
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
+static void AddTypedefResult(ResultBuilder &Results) {
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("typedef");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("name");
+ Results.AddResult(CodeCompleteConsumer::Result(Pattern));
+}
+
+static bool WantTypesInContext(Action::CodeCompletionContext CCC,
+ const LangOptions &LangOpts) {
+ if (LangOpts.CPlusPlus)
+ return true;
+
+ switch (CCC) {
+ case Action::CCC_Namespace:
+ case Action::CCC_Class:
+ case Action::CCC_ObjCInstanceVariableList:
+ case Action::CCC_Template:
+ case Action::CCC_MemberTemplate:
+ case Action::CCC_Statement:
+ case Action::CCC_RecoveryInFunction:
+ return true;
+
+ case Action::CCC_ObjCInterface:
+ case Action::CCC_ObjCImplementation:
+ case Action::CCC_Expression:
+ case Action::CCC_Condition:
+ return false;
+
+ case Action::CCC_ForInit:
+ return LangOpts.ObjC1 || LangOpts.C99;
+ }
+
+ return false;
+}
+
/// \brief Add language constructs that show up for "ordinary" names.
static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
Scope *S,
@@ -895,25 +1149,29 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
typedef CodeCompleteConsumer::Result Result;
switch (CCC) {
case Action::CCC_Namespace:
- if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
- // namespace <identifier> { }
- CodeCompletionString *Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("declarations");
- Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
-
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ CodeCompletionString *Pattern = 0;
+
+ if (Results.includeCodePatterns()) {
+ // namespace <identifier> { declarations }
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("namespace");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("identifier");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
+ Pattern->AddPlaceholderChunk("declarations");
+ Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Pattern));
+ }
+
// namespace identifier = identifier ;
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("namespace");
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
+ Pattern->AddPlaceholderChunk("name");
Pattern->AddChunk(CodeCompletionString::CK_Equal);
- Pattern->AddPlaceholderChunk("identifier");
+ Pattern->AddPlaceholderChunk("namespace");
Results.AddResult(Result(Pattern));
// Using directives
@@ -933,43 +1191,49 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
Pattern->AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Pattern));
- // Explicit template instantiation
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("template");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("declaration");
- Results.AddResult(Result(Pattern));
+ if (Results.includeCodePatterns()) {
+ // Explicit template instantiation
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("template");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("declaration");
+ Results.AddResult(Result(Pattern));
+ }
}
if (SemaRef.getLangOptions().ObjC1)
AddObjCTopLevelResults(Results, true);
+ AddTypedefResult(Results);
// Fall through
case Action::CCC_Class:
- if (Results.includeCodePatterns())
- Results.AddResult(Result("typedef"));
-
- if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
+ if (SemaRef.getLangOptions().CPlusPlus) {
// Using declaration
CodeCompletionString *Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("using");
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualified-id");
+ Pattern->AddPlaceholderChunk("qualifier");
+ Pattern->AddTextChunk("::");
+ Pattern->AddPlaceholderChunk("name");
Results.AddResult(Result(Pattern));
- // using typename qualified-id; (only in a dependent context)
+ // using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("using");
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
Pattern->AddTextChunk("typename");
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("qualified-id");
+ Pattern->AddPlaceholderChunk("qualifier");
+ Pattern->AddTextChunk("::");
+ Pattern->AddPlaceholderChunk("name");
Results.AddResult(Result(Pattern));
}
if (CCC == Action::CCC_Class) {
+ AddTypedefResult(Results);
+
// public:
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("public");
@@ -1025,8 +1289,7 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
case Action::CCC_RecoveryInFunction:
case Action::CCC_Statement: {
- if (Results.includeCodePatterns())
- Results.AddResult(Result("typedef"));
+ AddTypedefResult(Results);
CodeCompletionString *Pattern = 0;
if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) {
@@ -1081,10 +1344,11 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
}
// Switch-specific statements.
- if (!SemaRef.getSwitchStack().empty() && Results.includeCodePatterns()) {
+ if (!SemaRef.getSwitchStack().empty()) {
// case expression:
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("case");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
Pattern->AddPlaceholderChunk("expression");
Pattern->AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Pattern));
@@ -1178,23 +1442,21 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
}
Results.AddResult(Result(Pattern));
- if (Results.includeCodePatterns()) {
- // goto identifier ;
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("goto");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Results.AddResult(Result(Pattern));
+ // goto identifier ;
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("goto");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("label");
+ Results.AddResult(Result(Pattern));
- // Using directives
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("using");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddTextChunk("namespace");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Results.AddResult(Result(Pattern));
- }
+ // Using directives
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("using");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddTextChunk("namespace");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Pattern));
}
// Fall through (for statement expressions).
@@ -1215,132 +1477,133 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
Results.AddResult(Result("true"));
Results.AddResult(Result("false"));
- if (Results.includeCodePatterns()) {
- // dynamic_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("dynamic_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
-
- // static_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("static_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ // dynamic_cast < type-id > ( expression )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("dynamic_cast");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
+
+ // static_cast < type-id > ( expression )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("static_cast");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // reinterpret_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("reinterpret_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ // reinterpret_cast < type-id > ( expression )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("reinterpret_cast");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // const_cast < type-id > ( expression )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("const_cast");
- Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ // const_cast < type-id > ( expression )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("const_cast");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftAngle);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightAngle);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // typeid ( expression-or-type )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("typeid");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ // typeid ( expression-or-type )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("typeid");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression-or-type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // new T ( ... )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("new");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expressions");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
+ // new T ( ... )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("new");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expressions");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // new T [ ] ( ... )
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("new");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("type-id");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
- Pattern->AddPlaceholderChunk("size");
- Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expressions");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
-
- // delete expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("delete");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ // new T [ ] ( ... )
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("new");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("type");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
+ Pattern->AddPlaceholderChunk("size");
+ Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expressions");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
- // delete [] expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("delete");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
- Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
+ // delete expression
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("delete");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Pattern));
- // throw expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("throw");
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("expression");
- Results.AddResult(Result(Pattern));
- }
+ // delete [] expression
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("delete");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBracket);
+ Pattern->AddChunk(CodeCompletionString::CK_RightBracket);
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Pattern));
+
+ // throw expression
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("throw");
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Pattern));
// FIXME: Rethrow?
}
if (SemaRef.getLangOptions().ObjC1) {
// Add "super", if we're in an Objective-C class with a superclass.
- if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
- if (Method->getClassInterface()->getSuperClass())
- Results.AddResult(Result("super"));
-
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ // The interface can be NULL.
+ if (ObjCInterfaceDecl *ID = Method->getClassInterface())
+ if (ID->getSuperClass())
+ Results.AddResult(Result("super"));
+ }
+
AddObjCExpressionResults(Results, true);
}
- if (Results.includeCodePatterns()) {
- // sizeof expression
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk("sizeof");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression-or-type");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Results.AddResult(Result(Pattern));
- }
+ // sizeof expression
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk("sizeof");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression-or-type");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Pattern));
break;
}
}
- AddTypeSpecifierResults(SemaRef.getLangOptions(), Results);
+ if (WantTypesInContext(CCC, SemaRef.getLangOptions()))
+ AddTypeSpecifierResults(SemaRef.getLangOptions(), Results);
if (SemaRef.getLangOptions().CPlusPlus)
Results.AddResult(Result("operator"));
@@ -1702,9 +1965,9 @@ CodeCompleteConsumer::Result::CreateCodeCompletionString(Sema &S) {
if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
Keyword += II->getName().str();
Keyword += ":";
- if (Idx < StartParameter || AllParametersAreInformative) {
+ if (Idx < StartParameter || AllParametersAreInformative)
Result->AddInformativeChunk(Keyword);
- } else if (Idx == StartParameter)
+ else if (Idx == StartParameter)
Result->AddTypedTextChunk(Keyword);
else
Result->AddTextChunk(Keyword);
@@ -1719,14 +1982,18 @@ CodeCompleteConsumer::Result::CreateCodeCompletionString(Sema &S) {
Arg = "(" + Arg + ")";
if (IdentifierInfo *II = (*P)->getIdentifier())
Arg += II->getName().str();
- if (AllParametersAreInformative)
+ if (DeclaringEntity)
+ Result->AddTextChunk(Arg);
+ else if (AllParametersAreInformative)
Result->AddInformativeChunk(Arg);
else
Result->AddPlaceholderChunk(Arg);
}
if (Method->isVariadic()) {
- if (AllParametersAreInformative)
+ if (DeclaringEntity)
+ Result->AddTextChunk(", ...");
+ else if (AllParametersAreInformative)
Result->AddInformativeChunk(", ...");
else
Result->AddPlaceholderChunk(", ...");
@@ -1921,12 +2188,25 @@ namespace {
};
}
-static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results) {
+static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
+ bool TargetTypeIsPointer = false) {
+ typedef CodeCompleteConsumer::Result Result;
+
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
- M != MEnd; ++M)
- Results.AddResult(M->first);
+ M != MEnd; ++M) {
+ unsigned Priority = CCP_Macro;
+
+ // Treat the "nil" and "NULL" macros as null pointer constants.
+ if (M->first->isStr("nil") || M->first->isStr("NULL")) {
+ Priority = CCP_Constant;
+ if (TargetTypeIsPointer)
+ Priority = Priority / CCF_SimilarTypeMatch;
+ }
+
+ Results.AddResult(Result(M->first, Priority));
+ }
Results.ExitScope();
}
@@ -1966,7 +2246,10 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case CCC_Statement:
case CCC_ForInit:
case CCC_Condition:
- Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ if (WantTypesInContext(CompletionContext, getLangOptions()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
break;
case CCC_RecoveryInFunction:
@@ -1986,6 +2269,36 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size());
}
+/// \brief Perform code-completion in an expression context when we know what
+/// type we're looking for.
+void Sema::CodeCompleteExpression(Scope *S, QualType T) {
+ typedef CodeCompleteConsumer::Result Result;
+ ResultBuilder Results(*this);
+
+ if (WantTypesInContext(CCC_Expression, getLangOptions()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
+ Results.setPreferredType(T.getNonReferenceType());
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer);
+
+ Results.EnterNewScope();
+ AddOrdinaryNameResults(CCC_Expression, S, *this, Results);
+ Results.ExitScope();
+
+ bool PreferredTypeIsPointer = false;
+ if (!T.isNull())
+ PreferredTypeIsPointer = T->isAnyPointerType() ||
+ T->isMemberPointerType() || T->isBlockPointerType();
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, PreferredTypeIsPointer);
+ HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size());
+}
+
+
static void AddObjCProperties(ObjCContainerDecl *Container,
bool AllowCategories,
DeclContext *CurContext,
@@ -2254,6 +2567,17 @@ namespace {
};
}
+static bool anyNullArguments(Expr **Args, unsigned NumArgs) {
+ if (NumArgs && !Args)
+ return true;
+
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (!Args[I])
+ return true;
+
+ return false;
+}
+
void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn,
ExprTy **ArgsIn, unsigned NumArgs) {
if (!CodeCompleter)
@@ -2268,7 +2592,7 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn,
Expr **Args = (Expr **)ArgsIn;
// Ignore type-dependent call expressions entirely.
- if (Fn->isTypeDependent() ||
+ if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args, NumArgs) ||
Expr::hasAnyTypeDependentArguments(Args, NumArgs)) {
CodeCompleteOrdinaryName(S, CCC_Expression);
return;
@@ -2292,7 +2616,8 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn,
else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(NakedFn)) {
FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FDecl) {
- if (!FDecl->getType()->getAs<FunctionProtoType>())
+ if (!getLangOptions().CPlusPlus ||
+ !FDecl->getType()->getAs<FunctionProtoType>())
Results.push_back(ResultCandidate(FDecl));
else
// FIXME: access?
@@ -2302,6 +2627,8 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn,
}
}
+ QualType ParamType;
+
if (!CandidateSet.empty()) {
// Sort the overload candidate set by placing the best overloads first.
std::stable_sort(CandidateSet.begin(), CandidateSet.end(),
@@ -2314,14 +2641,85 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn,
if (Cand->Viable)
Results.push_back(ResultCandidate(Cand->Function));
}
+
+ // From the viable candidates, try to determine the type of this parameter.
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ if (const FunctionType *FType = Results[I].getFunctionType())
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType))
+ if (NumArgs < Proto->getNumArgs()) {
+ if (ParamType.isNull())
+ ParamType = Proto->getArgType(NumArgs);
+ else if (!Context.hasSameUnqualifiedType(
+ ParamType.getNonReferenceType(),
+ Proto->getArgType(NumArgs).getNonReferenceType())) {
+ ParamType = QualType();
+ break;
+ }
+ }
+ }
+ } else {
+ // Try to determine the parameter type from the type of the expression
+ // being called.
+ QualType FunctionType = Fn->getType();
+ if (const PointerType *Ptr = FunctionType->getAs<PointerType>())
+ FunctionType = Ptr->getPointeeType();
+ else if (const BlockPointerType *BlockPtr
+ = FunctionType->getAs<BlockPointerType>())
+ FunctionType = BlockPtr->getPointeeType();
+ else if (const MemberPointerType *MemPtr
+ = FunctionType->getAs<MemberPointerType>())
+ FunctionType = MemPtr->getPointeeType();
+
+ if (const FunctionProtoType *Proto
+ = FunctionType->getAs<FunctionProtoType>()) {
+ if (NumArgs < Proto->getNumArgs())
+ ParamType = Proto->getArgType(NumArgs);
+ }
}
- CodeCompleteOrdinaryName(S, CCC_Expression);
+ if (ParamType.isNull())
+ CodeCompleteOrdinaryName(S, CCC_Expression);
+ else
+ CodeCompleteExpression(S, ParamType);
+
if (!Results.empty())
CodeCompleter->ProcessOverloadCandidates(*this, NumArgs, Results.data(),
Results.size());
}
+void Sema::CodeCompleteInitializer(Scope *S, DeclPtrTy D) {
+ ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D.getAs<Decl>());
+ if (!VD) {
+ CodeCompleteOrdinaryName(S, CCC_Expression);
+ return;
+ }
+
+ CodeCompleteExpression(S, VD->getType());
+}
+
+void Sema::CodeCompleteReturn(Scope *S) {
+ QualType ResultType;
+ if (isa<BlockDecl>(CurContext)) {
+ if (BlockScopeInfo *BSI = getCurBlock())
+ ResultType = BSI->ReturnType;
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
+ ResultType = Function->getResultType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
+ ResultType = Method->getResultType();
+
+ if (ResultType.isNull())
+ CodeCompleteOrdinaryName(S, CCC_Expression);
+ else
+ CodeCompleteExpression(S, ResultType);
+}
+
+void Sema::CodeCompleteAssignmentRHS(Scope *S, ExprTy *LHS) {
+ if (LHS)
+ CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
+ else
+ CodeCompleteOrdinaryName(S, CCC_Expression);
+}
+
void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext) {
if (!SS.getScopeRep() || !CodeCompleter)
@@ -2460,9 +2858,6 @@ void Sema::CodeCompleteOperatorName(Scope *S) {
static void AddObjCImplementationResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
// Since we have an implementation, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
@@ -2488,9 +2883,6 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts,
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
// Since we have an interface or protocol, we can end it.
@@ -2509,9 +2901,6 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts,
}
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
CodeCompletionString *Pattern = 0;
@@ -2519,31 +2908,33 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("identifier");
- Results.AddResult(Result(Pattern));
-
- // @interface name
- // FIXME: Could introduce the whole pattern, including superclasses and
- // such.
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("class");
+ Pattern->AddPlaceholderChunk("name");
Results.AddResult(Result(Pattern));
- // @protocol name
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("protocol");
- Results.AddResult(Result(Pattern));
+ if (Results.includeCodePatterns()) {
+ // @interface name
+ // FIXME: Could introduce the whole pattern, including superclasses and
+ // such.
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("class");
+ Results.AddResult(Result(Pattern));
- // @implementation name
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddPlaceholderChunk("class");
- Results.AddResult(Result(Pattern));
+ // @protocol name
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("protocol");
+ Results.AddResult(Result(Pattern));
+
+ // @implementation name
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddPlaceholderChunk("class");
+ Results.AddResult(Result(Pattern));
+ }
// @compatibility_alias name
Pattern = new CodeCompletionString;
@@ -2571,9 +2962,6 @@ void Sema::CodeCompleteObjCAtDirective(Scope *S, DeclPtrTy ObjCImpDecl,
}
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
CodeCompletionString *Pattern = 0;
@@ -2603,31 +2991,30 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
}
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
CodeCompletionString *Pattern = 0;
- // @try { statements } @catch ( declaration ) { statements } @finally
- // { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("@catch");
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("parameter");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Pattern->AddTextChunk("@finally");
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ if (Results.includeCodePatterns()) {
+ // @try { statements } @catch ( declaration ) { statements } @finally
+ // { statements }
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
+ Pattern->AddPlaceholderChunk("statements");
+ Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Pattern->AddTextChunk("@catch");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("parameter");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
+ Pattern->AddPlaceholderChunk("statements");
+ Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Pattern->AddTextChunk("@finally");
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
+ Pattern->AddPlaceholderChunk("statements");
+ Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Pattern));
+ }
// @throw
Pattern = new CodeCompletionString;
@@ -2636,25 +3023,24 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
Pattern->AddPlaceholderChunk("expression");
Results.AddResult(Result(Pattern));
- // @synchronized ( expression ) { statements }
- Pattern = new CodeCompletionString;
- Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
- Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
- Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
- Pattern->AddPlaceholderChunk("expression");
- Pattern->AddChunk(CodeCompletionString::CK_RightParen);
- Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
- Pattern->AddPlaceholderChunk("statements");
- Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
- Results.AddResult(Result(Pattern));
+ if (Results.includeCodePatterns()) {
+ // @synchronized ( expression ) { statements }
+ Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftParen);
+ Pattern->AddPlaceholderChunk("expression");
+ Pattern->AddChunk(CodeCompletionString::CK_RightParen);
+ Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
+ Pattern->AddPlaceholderChunk("statements");
+ Pattern->AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Pattern));
+ }
}
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
- if (!Results.includeCodePatterns())
- return;
-
typedef CodeCompleteConsumer::Result Result;
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,private)));
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,protected)));
@@ -3021,6 +3407,31 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
.Default(0);
}
+void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
+ typedef CodeCompleteConsumer::Result Result;
+ ResultBuilder Results(*this);
+
+ // Find anything that looks like it could be a message receiver.
+ Results.setFilter(&ResultBuilder::IsObjCMessageReceiver);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.EnterNewScope();
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer);
+
+ // If we are in an Objective-C method inside a class that has a superclass,
+ // add "super" as an option.
+ if (ObjCMethodDecl *Method = getCurMethodDecl())
+ if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
+ if (Iface->getSuperClass())
+ Results.AddResult(Result("super"));
+
+ Results.ExitScope();
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+ HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size());
+
+}
+
void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
IdentifierInfo **SelIdents,
unsigned NumSelIdents) {
@@ -3113,9 +3524,9 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, TypeTy *Receiver,
// If we have an external source, load the entire class method
// pool from the PCH file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumKnownSelectors(); I != N;
- ++I) {
- Selector Sel = ExternalSource->GetSelector(I);
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || FactoryMethodPool.count(Sel) ||
InstanceMethodPool.count(Sel))
continue;
@@ -3214,9 +3625,9 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver,
// If we have an external source, load the entire class method
// pool from the PCH file.
if (ExternalSource) {
- for (uint32_t I = 0, N = ExternalSource->GetNumKnownSelectors(); I != N;
- ++I) {
- Selector Sel = ExternalSource->GetSelector(I);
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || InstanceMethodPool.count(Sel) ||
FactoryMethodPool.count(Sel))
continue;
@@ -3550,14 +3961,11 @@ static void FindImplementableMethods(ASTContext &Context,
// Add methods from any class extensions (but not from categories;
// those should go into category implementations).
- for (ObjCCategoryDecl *Cat = IFace->getCategoryList(); Cat;
- Cat = Cat->getNextClassCategory()) {
- if (!Cat->IsClassExtension())
- continue;
-
- FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType,
+ for (const ObjCCategoryDecl *Cat = IFace->getFirstClassExtension(); Cat;
+ Cat = Cat->getNextClassExtension())
+ FindImplementableMethods(Context, const_cast<ObjCCategoryDecl*>(Cat),
+ WantInstanceMethods, ReturnType,
IsInImplementation, KnownMethods);
- }
}
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
@@ -3714,7 +4122,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
Pattern->AddTextChunk("...");
}
- if (IsInImplementation) {
+ if (IsInImplementation && Results.includeCodePatterns()) {
// We will be defining the method here, so add a compound statement.
Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
Pattern->AddChunk(CodeCompletionString::CK_LeftBrace);
@@ -3739,3 +4147,70 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S,
HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size());
}
+
+void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ TypeTy *ReturnTy,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents) {
+ llvm::DenseMap<Selector, ObjCMethodList> &Pool
+ = IsInstanceMethod? InstanceMethodPool : FactoryMethodPool;
+
+ // If we have an external source, load the entire class method
+ // pool from the PCH file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || InstanceMethodPool.count(Sel) ||
+ FactoryMethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel, IsInstanceMethod);
+ }
+ }
+
+ // Build the set of methods we can see.
+ typedef CodeCompleteConsumer::Result Result;
+ ResultBuilder Results(*this);
+
+ if (ReturnTy)
+ Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
+
+ Results.EnterNewScope();
+ for (llvm::DenseMap<Selector, ObjCMethodList>::iterator M = Pool.begin(),
+ MEnd = Pool.end();
+ M != MEnd;
+ ++M) {
+ for (ObjCMethodList *MethList = &M->second; MethList && MethList->Method;
+ MethList = MethList->Next) {
+ if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
+ NumSelIdents))
+ continue;
+
+ if (AtParameterName) {
+ // Suggest parameter names we've seen before.
+ if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) {
+ ParmVarDecl *Param = MethList->Method->param_begin()[NumSelIdents-1];
+ if (Param->getIdentifier()) {
+ CodeCompletionString *Pattern = new CodeCompletionString;
+ Pattern->AddTypedTextChunk(Param->getIdentifier()->getName());
+ Results.AddResult(Pattern);
+ }
+ }
+
+ continue;
+ }
+
+ Result R(MethList->Method, 0);
+ R.StartParameter = NumSelIdents;
+ R.AllParametersAreInformative = false;
+ R.DeclaringEntity = true;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size());
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
index af02099..c1c898f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -87,8 +87,8 @@ Sema::TypeTy *Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
if (!isClassName)
return 0;
- // We know from the grammar that this name refers to a type, so build a
- // DependentNameType node to describe the type.
+ // We know from the grammar that this name refers to a type,
+ // so build a dependent node to describe the type.
return CheckTypenameType(ETK_None,
(NestedNameSpecifier *)SS->getScopeRep(), II,
SourceLocation(), SS->getRange(), NameLoc
@@ -196,12 +196,6 @@ Sema::TypeTy *Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
} else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
T = Context.getObjCInterfaceType(IDecl);
- } else if (UnresolvedUsingTypenameDecl *UUDecl =
- dyn_cast<UnresolvedUsingTypenameDecl>(IIDecl)) {
- // FIXME: preserve source structure information.
- T = Context.getDependentNameType(ETK_None,
- UUDecl->getTargetNestedNameSpecifier(),
- &II);
} else {
// If it's not plausibly a type, suppress diagnostics.
Result.suppressDiagnostics();
@@ -313,7 +307,7 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
<< (NestedNameSpecifier *)SS->getScopeRep() << II.getName()
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
- SuggestedType = ActOnTypenameType(SourceLocation(), *SS, II, IILoc).get();
+ SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
@@ -959,7 +953,7 @@ Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
return Sema::CXXCopyAssignment;
}
-/// canREdefineFunction - checks if a function can be redefined. Currently,
+/// canRedefineFunction - checks if a function can be redefined. Currently,
/// only extern inline functions can be redefined, and even then only in
/// GNU89 mode.
static bool canRedefineFunction(const FunctionDecl *FD,
@@ -1063,13 +1057,27 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
}
// FIXME: diagnose the other way around?
- if (OldType->getNoReturnAttr() &&
- !NewType->getNoReturnAttr()) {
+ if (OldType->getNoReturnAttr() && !NewType->getNoReturnAttr()) {
NewQType = Context.getNoReturnType(NewQType);
New->setType(NewQType);
assert(NewQType.isCanonical());
}
+ // Merge regparm attribute.
+ if (OldType->getRegParmType() != NewType->getRegParmType()) {
+ if (NewType->getRegParmType()) {
+ Diag(New->getLocation(), diag::err_regparm_mismatch)
+ << NewType->getRegParmType()
+ << OldType->getRegParmType();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ NewQType = Context.getRegParmType(NewQType, OldType->getRegParmType());
+ New->setType(NewQType);
+ assert(NewQType.isCanonical());
+ }
+
if (getLangOptions().CPlusPlus) {
// (C++98 13.1p2):
// Certain function declarations cannot be overloaded:
@@ -1446,6 +1454,17 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->setInvalidDecl();
return;
}
+ // c99 6.2.2 P4.
+ // For an identifier declared with the storage-class specifier extern in a
+ // scope in which a prior declaration of that identifier is visible, if
+ // the prior declaration specifies internal or external linkage, the linkage
+ // of the identifier at the later declaration is the same as the linkage
+ // specified at the prior declaration.
+ // FIXME. revisit this code.
+ if (New->hasExternalStorage() &&
+ Old->getLinkage() == InternalLinkage &&
+ New->getDeclContext() == Old->getDeclContext())
+ New->setStorageClass(Old->getStorageClass());
// Keep a chain of previous declarations.
New->setPreviousDeclaration(Old);
@@ -1520,6 +1539,14 @@ Sema::DeclPtrTy Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
return DeclPtrTy::make(Tag);
}
+ if (getLangOptions().CPlusPlus &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
+ if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
+ if (Enum->enumerator_begin() == Enum->enumerator_end() &&
+ !Enum->getIdentifier() && !Enum->isInvalidDecl())
+ Diag(Enum->getLocation(), diag::ext_no_declarators)
+ << DS.getSourceRange();
+
if (!DS.isMissingDeclaratorOk() &&
DS.getTypeSpecType() != DeclSpec::TST_error) {
// Warn about typedefs of enums without names, since this is an
@@ -1770,6 +1797,8 @@ Sema::DeclPtrTy Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
<< (int)Record->isUnion();
Invalid = true;
}
+ } else if (isa<AccessSpecDecl>(*Mem)) {
+ // Any access specifier is fine.
} else {
// We have something that isn't a non-static data
// member. Complain about it.
@@ -1795,8 +1824,7 @@ Sema::DeclPtrTy Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// Mock up a declarator.
Declarator Dc(DS, Declarator::TypeNameContext);
- TypeSourceInfo *TInfo = 0;
- GetTypeForDeclarator(Dc, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
// Create a declaration for this anonymous struct/union.
@@ -2091,8 +2119,8 @@ Sema::HandleDeclarator(Scope *S, Declarator &D,
NamedDecl *New;
- TypeSourceInfo *TInfo = 0;
- QualType R = GetTypeForDeclarator(D, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType R = TInfo->getType();
LookupResult Previous(*this, Name, D.getIdentifierLoc(), LookupOrdinaryName,
ForRedeclaration);
@@ -2342,6 +2370,12 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
if (D.getDeclSpec().isThreadSpecified())
Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+ if (D.getName().Kind != UnqualifiedId::IK_Identifier) {
+ Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
+ << D.getName().getSourceRange();
+ return 0;
+ }
+
TypedefDecl *NewTD = ParseTypedefDecl(S, D, R, TInfo);
if (!NewTD) return 0;
@@ -2537,6 +2571,8 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool isExplicitSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = TemplateParamLists.size();
+ bool Invalid = false;
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getSourceRange().getBegin(),
@@ -2544,7 +2580,11 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
(TemplateParameterList**)TemplateParamLists.get(),
TemplateParamLists.size(),
/*never a friend*/ false,
- isExplicitSpecialization)) {
+ isExplicitSpecialization,
+ Invalid)) {
+ // All but one template parameter lists have been matching.
+ --NumMatchedTemplateParamLists;
+
if (TemplateParams->size() > 0) {
// There is no such thing as a variable template.
Diag(D.getIdentifierLoc(), diag::err_template_variable)
@@ -2568,11 +2608,17 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
VarDecl *NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(),
II, R, TInfo, SC, SCAsWritten);
- if (D.isInvalidType())
+ if (D.isInvalidType() || Invalid)
NewVD->setInvalidDecl();
SetNestedNameSpecifier(NewVD, D);
+ if (NumMatchedTemplateParamLists > 0) {
+ NewVD->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ (TemplateParameterList**)TemplateParamLists.release());
+ }
+
if (D.getDeclSpec().isThreadSpecified()) {
if (NewVD->hasLocalStorage())
Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_non_global);
@@ -2831,6 +2877,23 @@ void Sema::CheckVariableDeclaration(VarDecl *NewVD,
return NewVD->setInvalidDecl();
}
+ // Function pointers and references cannot have qualified function type, only
+ // function pointer-to-members can do that.
+ QualType Pointee;
+ unsigned PtrOrRef = 0;
+ if (const PointerType *Ptr = T->getAs<PointerType>())
+ Pointee = Ptr->getPointeeType();
+ else if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
+ Pointee = Ref->getPointeeType();
+ PtrOrRef = 1;
+ }
+ if (!Pointee.isNull() && Pointee->isFunctionProtoType() &&
+ Pointee->getAs<FunctionProtoType>()->getTypeQuals() != 0) {
+ Diag(NewVD->getLocation(), diag::err_invalid_qualified_function_pointer)
+ << PtrOrRef;
+ return NewVD->setInvalidDecl();
+ }
+
if (!Previous.empty()) {
Redeclaration = true;
MergeVarDecl(NewVD, Previous);
@@ -2858,7 +2921,7 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
// FIXME: Do we care about other names here too?
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
- // We really want to find the base class constructor here.
+ // We really want to find the base class destructor here.
QualType T = Data->S->Context.getTypeDeclType(BaseRecord);
CanQualType CT = Data->S->Context.getCanonicalType(T);
@@ -2868,8 +2931,9 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
for (Path.Decls = BaseRecord->lookup(Name);
Path.Decls.first != Path.Decls.second;
++Path.Decls.first) {
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*Path.Decls.first)) {
- if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD))
+ NamedDecl *D = *Path.Decls.first;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD, false))
return true;
}
}
@@ -2992,7 +3056,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
if (DC->isRecord()) {
- R = CheckDestructorDeclarator(D, SC);
+ R = CheckDestructorDeclarator(D, R, SC);
NewFD = CXXDestructorDecl::Create(Context,
cast<CXXRecordDecl>(DC),
@@ -3093,6 +3157,8 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
FunctionTemplateDecl *FunctionTemplate = 0;
bool isExplicitSpecialization = false;
bool isFunctionTemplateSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = TemplateParamLists.size();
+ bool Invalid = false;
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getSourceRange().getBegin(),
@@ -3100,7 +3166,11 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
(TemplateParameterList**)TemplateParamLists.get(),
TemplateParamLists.size(),
isFriend,
- isExplicitSpecialization)) {
+ isExplicitSpecialization,
+ Invalid)) {
+ // All but one template parameter lists have been matching.
+ --NumMatchedTemplateParamLists;
+
if (TemplateParams->size() > 0) {
// This is a function template
@@ -3140,9 +3210,18 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
<< FixItHint::CreateInsertion(InsertLoc, "<>");
}
}
+ }
- // FIXME: Free this memory properly.
- TemplateParamLists.release();
+ if (NumMatchedTemplateParamLists > 0) {
+ NewFD->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ (TemplateParameterList**)TemplateParamLists.release());
+ }
+
+ if (Invalid) {
+ NewFD->setInvalidDecl();
+ if (FunctionTemplate)
+ FunctionTemplate->setInvalidDecl();
}
// C++ [dcl.fct.spec]p5:
@@ -3272,14 +3351,8 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// Synthesize a parameter for each argument type.
for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
AE = FT->arg_type_end(); AI != AE; ++AI) {
- ParmVarDecl *Param = ParmVarDecl::Create(Context, NewFD,
- D.getIdentifierLoc(), 0,
- *AI,
- Context.getTrivialTypeSourceInfo(*AI,
- D.getIdentifierLoc()),
- VarDecl::None,
- VarDecl::None, 0);
- Param->setImplicit();
+ ParmVarDecl *Param =
+ BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), *AI);
Params.push_back(Param);
}
} else {
@@ -3456,7 +3529,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
if (Redeclaration && Previous.isSingleResult()) {
const FunctionDecl *Def;
FunctionDecl *PrevFD = dyn_cast<FunctionDecl>(Previous.getFoundDecl());
- if (PrevFD && PrevFD->getBody(Def) && D.hasAttributes()) {
+ if (PrevFD && PrevFD->hasBody(Def) && D.hasAttributes()) {
Diag(NewFD->getLocation(), diag::warn_attribute_precede_definition);
Diag(Def->getLocation(), diag::note_previous_definition);
}
@@ -3582,13 +3655,10 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
}
- switch (CheckOverload(NewFD, Previous, OldDecl)) {
+ switch (CheckOverload(S, NewFD, Previous, OldDecl,
+ /*NewIsUsingDecl*/ false)) {
case Ovl_Match:
Redeclaration = true;
- if (isa<UsingShadowDecl>(OldDecl) && CurContext->isRecord()) {
- HideUsingShadowDecl(S, cast<UsingShadowDecl>(OldDecl));
- Redeclaration = false;
- }
break;
case Ovl_NonFunction:
@@ -3647,7 +3717,7 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
CXXRecordDecl *Record = Destructor->getParent();
QualType ClassType = Context.getTypeDeclType(Record);
- // FIXME: Shouldn't we be able to perform thisc heck even when the class
+ // FIXME: Shouldn't we be able to perform this check even when the class
// type is dependent? Both gcc and edg can handle that.
if (!ClassType->isDependentType()) {
DeclarationName Name
@@ -3943,7 +4013,7 @@ void Sema::AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit) {
QualType T = VDecl->getType();
if (!T->isDependentType() &&
(!Context.getCanonicalType(T).isConstQualified() ||
- !T->isIntegralType())) {
+ !T->isIntegralOrEnumerationType())) {
Diag(VDecl->getLocation(), diag::err_member_initialization)
<< VDecl->getDeclName() << Init->getSourceRange();
VDecl->setInvalidDecl();
@@ -3954,7 +4024,7 @@ void Sema::AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit) {
// can specify a constant-initializer which shall be an
// integral constant expression (5.19).
if (!Init->isTypeDependent() &&
- !Init->getType()->isIntegralType()) {
+ !Init->getType()->isIntegralOrEnumerationType()) {
// We have a non-dependent, non-integral or enumeration type.
Diag(Init->getSourceRange().getBegin(),
diag::err_in_class_initializer_non_integral_type)
@@ -4264,9 +4334,9 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (getLangOptions().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
- TypeSourceInfo *TInfo = 0;
TagDecl *OwnedDecl = 0;
- QualType parmDeclType = GetTypeForDeclarator(D, S, &TInfo, &OwnedDecl);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl);
+ QualType parmDeclType = TInfo->getType();
if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) {
// C++ [dcl.fct]p6:
@@ -4331,6 +4401,18 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
return DeclPtrTy::make(New);
}
+/// \brief Synthesizes a variable for a parameter arising from a
+/// typedef.
+ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
+ SourceLocation Loc,
+ QualType T) {
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, 0,
+ T, Context.getTrivialTypeSourceInfo(T, Loc),
+ VarDecl::None, VarDecl::None, 0);
+ Param->setImplicit();
+ return Param;
+}
+
ParmVarDecl *Sema::CheckParameter(DeclContext *DC,
TypeSourceInfo *TSInfo, QualType T,
IdentifierInfo *Name,
@@ -4489,7 +4571,7 @@ Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) {
// But don't complain if we're in GNU89 mode and the previous definition
// was an extern inline function.
const FunctionDecl *Definition;
- if (FD->getBody(Definition) &&
+ if (FD->hasBody(Definition) &&
!canRedefineFunction(Definition, getLangOptions())) {
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
@@ -4964,16 +5046,25 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// FIXME: Check explicit specializations more carefully.
bool isExplicitSpecialization = false;
+ unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size();
+ bool Invalid = false;
if (TUK != TUK_Reference) {
if (TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(KWLoc, SS,
(TemplateParameterList**)TemplateParameterLists.get(),
TemplateParameterLists.size(),
TUK == TUK_Friend,
- isExplicitSpecialization)) {
+ isExplicitSpecialization,
+ Invalid)) {
+ // All but one template parameter lists have been matching.
+ --NumMatchedTemplateParamLists;
+
if (TemplateParams->size() > 0) {
// This is a declaration or definition of a class template (which may
// be a member of another template).
+ if (Invalid)
+ return DeclPtrTy();
+
OwnedDecl = false;
DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc,
SS, Name, NameLoc, Attr,
@@ -4988,14 +5079,11 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
isExplicitSpecialization = true;
}
}
-
- TemplateParameterLists.release();
}
DeclContext *SearchDC = CurContext;
DeclContext *DC = CurContext;
bool isStdBadAlloc = false;
- bool Invalid = false;
RedeclarationKind Redecl = ForRedeclaration;
if (TUK == TUK_Friend || TUK == TUK_Reference)
@@ -5231,7 +5319,8 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// for the consumer of this Decl to know it doesn't own it.
// For our current ASTs this shouldn't be a problem, but will
// need to be changed with DeclGroups.
- if (TUK == TUK_Reference || TUK == TUK_Friend)
+ if ((TUK == TUK_Reference && !PrevTagDecl->getFriendObjectKind()) ||
+ TUK == TUK_Friend)
return DeclPtrTy::make(PrevTagDecl);
// Diagnose attempts to redefine a tag.
@@ -5364,10 +5453,17 @@ CreateNewDecl:
New = EnumDecl::Create(Context, SearchDC, Loc, Name, KWLoc,
cast_or_null<EnumDecl>(PrevDecl));
// If this is an undefined enum, warn.
- if (TUK != TUK_Definition && !Invalid) {
- unsigned DK = getLangOptions().CPlusPlus? diag::err_forward_ref_enum
- : diag::ext_forward_ref_enum;
- Diag(Loc, DK);
+ if (TUK != TUK_Definition && !Invalid) {
+ TagDecl *Def;
+ if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
+ Diag(Loc, diag::ext_forward_ref_enum_def)
+ << New;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ } else {
+ Diag(Loc,
+ getLangOptions().CPlusPlus? diag::err_forward_ref_enum
+ : diag::ext_forward_ref_enum);
+ }
}
} else {
// struct/union/class
@@ -5392,6 +5488,11 @@ CreateNewDecl:
NestedNameSpecifier *NNS
= static_cast<NestedNameSpecifier*>(SS.getScopeRep());
New->setQualifierInfo(NNS, SS.getRange());
+ if (NumMatchedTemplateParamLists > 0) {
+ New->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
}
else
Invalid = true;
@@ -5547,7 +5648,7 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
- if (!FieldTy->isDependentType() && !FieldTy->isIntegralType()) {
+ if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
// Handle incomplete types with specific error.
if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
return true;
@@ -5629,8 +5730,8 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
- TypeSourceInfo *TInfo = 0;
- QualType T = GetTypeForDeclarator(D, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
if (getLangOptions().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
@@ -5740,6 +5841,24 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
ZeroWidth = false;
}
+ // Check that 'mutable' is consistent with the type of the declaration.
+ if (!InvalidDecl && Mutable) {
+ unsigned DiagID = 0;
+ if (T->isReferenceType())
+ DiagID = diag::err_mutable_reference;
+ else if (T.isConstQualified())
+ DiagID = diag::err_mutable_const;
+
+ if (DiagID) {
+ SourceLocation ErrLoc = Loc;
+ if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid())
+ ErrLoc = D->getDeclSpec().getStorageClassSpecLoc();
+ Diag(ErrLoc, DiagID);
+ Mutable = false;
+ InvalidDecl = true;
+ }
+ }
+
FieldDecl *NewFD = FieldDecl::Create(Context, Record, Loc, II, T, TInfo,
BitWidth, Mutable);
if (InvalidDecl)
@@ -5761,41 +5880,42 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
-
- if (!RDecl->hasTrivialConstructor())
- CXXRecord->setHasTrivialConstructor(false);
- if (!RDecl->hasTrivialCopyConstructor())
- CXXRecord->setHasTrivialCopyConstructor(false);
- if (!RDecl->hasTrivialCopyAssignment())
- CXXRecord->setHasTrivialCopyAssignment(false);
- if (!RDecl->hasTrivialDestructor())
- CXXRecord->setHasTrivialDestructor(false);
-
- // C++ 9.5p1: An object of a class with a non-trivial
- // constructor, a non-trivial copy constructor, a non-trivial
- // destructor, or a non-trivial copy assignment operator
- // cannot be a member of a union, nor can an array of such
- // objects.
- // TODO: C++0x alters this restriction significantly.
- if (Record->isUnion()) {
- // We check for copy constructors before constructors
- // because otherwise we'll never get complaints about
- // copy constructors.
-
- CXXSpecialMember member = CXXInvalid;
+ if (RDecl->getDefinition()) {
+ if (!RDecl->hasTrivialConstructor())
+ CXXRecord->setHasTrivialConstructor(false);
if (!RDecl->hasTrivialCopyConstructor())
- member = CXXCopyConstructor;
- else if (!RDecl->hasTrivialConstructor())
- member = CXXConstructor;
- else if (!RDecl->hasTrivialCopyAssignment())
- member = CXXCopyAssignment;
- else if (!RDecl->hasTrivialDestructor())
- member = CXXDestructor;
-
- if (member != CXXInvalid) {
- Diag(Loc, diag::err_illegal_union_member) << Name << member;
- DiagnoseNontrivial(RT, member);
- NewFD->setInvalidDecl();
+ CXXRecord->setHasTrivialCopyConstructor(false);
+ if (!RDecl->hasTrivialCopyAssignment())
+ CXXRecord->setHasTrivialCopyAssignment(false);
+ if (!RDecl->hasTrivialDestructor())
+ CXXRecord->setHasTrivialDestructor(false);
+
+ // C++ 9.5p1: An object of a class with a non-trivial
+ // constructor, a non-trivial copy constructor, a non-trivial
+ // destructor, or a non-trivial copy assignment operator
+ // cannot be a member of a union, nor can an array of such
+ // objects.
+ // TODO: C++0x alters this restriction significantly.
+ if (Record->isUnion()) {
+ // We check for copy constructors before constructors
+ // because otherwise we'll never get complaints about
+ // copy constructors.
+
+ CXXSpecialMember member = CXXInvalid;
+ if (!RDecl->hasTrivialCopyConstructor())
+ member = CXXCopyConstructor;
+ else if (!RDecl->hasTrivialConstructor())
+ member = CXXConstructor;
+ else if (!RDecl->hasTrivialCopyAssignment())
+ member = CXXCopyAssignment;
+ else if (!RDecl->hasTrivialDestructor())
+ member = CXXDestructor;
+
+ if (member != CXXInvalid) {
+ Diag(Loc, diag::err_illegal_union_member) << Name << member;
+ DiagnoseNontrivial(RT, member);
+ NewFD->setInvalidDecl();
+ }
}
}
}
@@ -5842,7 +5962,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
typedef CXXRecordDecl::ctor_iterator ctor_iter;
for (ctor_iter ci = RD->ctor_begin(), ce = RD->ctor_end(); ci != ce;++ci){
const FunctionDecl *body = 0;
- ci->getBody(body);
+ ci->hasBody(body);
if (!body || !cast<CXXConstructorDecl>(body)->isImplicitlyDefined()) {
SourceLocation CtorLoc = ci->getLocation();
Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << member;
@@ -5876,7 +5996,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
case CXXDestructor:
if (RD->hasUserDeclaredDestructor()) {
- SourceLocation DtorLoc = RD->getDestructor(Context)->getLocation();
+ SourceLocation DtorLoc = LookupDestructor(RD)->getLocation();
Diag(DtorLoc, diag::note_nontrivial_user_defined) << QT << member;
return;
}
@@ -5985,8 +6105,8 @@ Sema::DeclPtrTy Sema::ActOnIvar(Scope *S,
// FIXME: Unnamed fields can be handled in various different ways, for
// example, unnamed unions inject all members into the struct namespace!
- TypeSourceInfo *TInfo = 0;
- QualType T = GetTypeForDeclarator(D, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
if (BitWidth) {
// 6.7.2.1p3, 6.7.2.1p4
@@ -6188,6 +6308,12 @@ void Sema::ActOnFields(Scope* S,
(FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong()))
Record->setHasObjectMember(true);
+ else if (Context.getAsArrayType(FD->getType())) {
+ QualType BaseType = Context.getBaseElementType(FD->getType());
+ if (Record && BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ }
// Keep track of the number of named members.
if (FD->getIdentifier())
++NumNamedMembers;
@@ -6241,7 +6367,7 @@ void Sema::ActOnFields(Scope* S,
static bool isRepresentableIntegerValue(ASTContext &Context,
llvm::APSInt &Value,
QualType T) {
- assert(T->isIntegralType() && "Integral type required!");
+ assert(T->isIntegralType(Context) && "Integral type required!");
unsigned BitWidth = Context.getIntWidth(T);
if (Value.isUnsigned() || Value.isNonNegative())
@@ -6255,7 +6381,7 @@ static bool isRepresentableIntegerValue(ASTContext &Context,
static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
// FIXME: Int128/UInt128 support, which also needs to be introduced into
// enum checking below.
- assert(T->isIntegralType() && "Integral type required!");
+ assert(T->isIntegralType(Context) && "Integral type required!");
const unsigned NumTypes = 4;
QualType SignedIntegralTypes[NumTypes] = {
Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index c6dcc3b..3b82f58 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -913,7 +913,7 @@ static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
isDef = (!VD->hasExternalStorage() || VD->getInit());
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- isDef = FD->getBody();
+ isDef = FD->hasBody();
} else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D)) {
// We ignore weak import on properties and methods
return;
@@ -1180,6 +1180,54 @@ static FormatAttrKind getFormatAttrKind(llvm::StringRef Format) {
return InvalidFormat;
}
+/// Handle __attribute__((init_priority(priority))) attributes based on
+/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
+static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ if (!S.getLangOptions().CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ if (!isa<VarDecl>(d) || S.getCurFunctionOrMethodDecl()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+ QualType T = dyn_cast<VarDecl>(d)->getType();
+ if (S.Context.getAsArrayType(T))
+ T = S.Context.getBaseElementType(T);
+ if (!T->getAs<RecordType>()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *priorityExpr = static_cast<Expr *>(Attr.getArg(0));
+
+ llvm::APSInt priority(32);
+ if (priorityExpr->isTypeDependent() || priorityExpr->isValueDependent() ||
+ !priorityExpr->isIntegerConstantExpr(priority, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "init_priority" << priorityExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ unsigned prioritynum = priority.getZExtValue();
+ if (prioritynum < 101 || prioritynum > 65535) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
+ << priorityExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ d->addAttr(::new (S.Context) InitPriorityAttr(prioritynum));
+}
+
/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -1362,9 +1410,10 @@ static void HandleTransparentUnionAttr(Decl *d, const AttributeList &Attr,
FieldDecl *FirstField = *Field;
QualType FirstType = FirstField->getType();
- if (FirstType->isFloatingType() || FirstType->isVectorType()) {
+ if (FirstType->hasFloatingRepresentation() || FirstType->isVectorType()) {
S.Diag(FirstField->getLocation(),
- diag::warn_transparent_union_attribute_floating);
+ diag::warn_transparent_union_attribute_floating)
+ << FirstType->isVectorType() << FirstType;
return;
}
@@ -1410,7 +1459,7 @@ static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
d->addAttr(::new (S.Context) AnnotateAttr(S.Context, SE->getString()));
}
-static void HandleAlignedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void HandleAlignedAttr(Decl *D, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() > 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
@@ -1421,30 +1470,36 @@ static void HandleAlignedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// than GNU's, and should error out when it is used to specify a
// weaker alignment, rather than being silently ignored.
- unsigned Align = 0;
if (Attr.getNumArgs() == 0) {
// FIXME: This should be the target specific maximum alignment.
// (For now we just use 128 bits which is the maximum on X86).
- Align = 128;
- d->addAttr(::new (S.Context) AlignedAttr(Align));
+ D->addAttr(::new (S.Context) AlignedAttr(128));
+ return;
+ }
+
+ S.AddAlignedAttr(Attr.getLoc(), D, static_cast<Expr *>(Attr.getArg(0)));
+}
+
+void Sema::AddAlignedAttr(SourceLocation AttrLoc, Decl *D, Expr *E) {
+ if (E->isTypeDependent() || E->isValueDependent()) {
+ // Save dependent expressions in the AST to be instantiated.
+ D->addAttr(::new (Context) AlignedAttr(E));
return;
}
- Expr *alignmentExpr = static_cast<Expr *>(Attr.getArg(0));
llvm::APSInt Alignment(32);
- if (alignmentExpr->isTypeDependent() || alignmentExpr->isValueDependent() ||
- !alignmentExpr->isIntegerConstantExpr(Alignment, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
- << "aligned" << alignmentExpr->getSourceRange();
+ if (!E->isIntegerConstantExpr(Alignment, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_not_int)
+ << "aligned" << E->getSourceRange();
return;
}
if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
- S.Diag(Attr.getLoc(), diag::err_attribute_aligned_not_power_of_two)
- << alignmentExpr->getSourceRange();
+ Diag(AttrLoc, diag::err_attribute_aligned_not_power_of_two)
+ << E->getSourceRange();
return;
}
- d->addAttr(::new (S.Context) AlignedAttr(Alignment.getZExtValue() * 8));
+ D->addAttr(::new (Context) AlignedAttr(Alignment.getZExtValue() * 8));
}
/// HandleModeAttr - This attribute modifies the width of a decl with primitive
@@ -1525,7 +1580,7 @@ static void HandleModeAttr(Decl *D, const AttributeList &Attr, Sema &S) {
if (!OldTy->getAs<BuiltinType>() && !OldTy->isComplexType())
S.Diag(Attr.getLoc(), diag::err_mode_not_primitive);
else if (IntegerMode) {
- if (!OldTy->isIntegralType())
+ if (!OldTy->isIntegralOrEnumerationType())
S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
} else if (ComplexMode) {
if (!OldTy->isComplexType())
@@ -1650,6 +1705,23 @@ static void HandleNoInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
d->addAttr(::new (S.Context) NoInlineAttr());
}
+static void HandleNoInstrumentFunctionAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NoInstrumentFunctionAttr());
+}
+
static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 0) {
@@ -1951,6 +2023,9 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_reqd_wg_size:
HandleReqdWorkGroupSize(D, Attr, S); break;
+ case AttributeList::AT_init_priority:
+ HandleInitPriorityAttr(D, Attr, S); break;
+
case AttributeList::AT_packed: HandlePackedAttr (D, Attr, S); break;
case AttributeList::AT_section: HandleSectionAttr (D, Attr, S); break;
case AttributeList::AT_unavailable: HandleUnavailableAttr (D, Attr, S); break;
@@ -1979,9 +2054,11 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_noinline: HandleNoInlineAttr (D, Attr, S); break;
case AttributeList::AT_regparm: HandleRegparmAttr (D, Attr, S); break;
case AttributeList::IgnoredAttribute:
- case AttributeList::AT_no_instrument_function: // Interacts with -pg.
// Just ignore
break;
+ case AttributeList::AT_no_instrument_function: // Interacts with -pg.
+ HandleNoInstrumentFunctionAttr(D, Attr, S);
+ break;
case AttributeList::AT_stdcall:
case AttributeList::AT_cdecl:
case AttributeList::AT_fastcall:
@@ -1992,7 +2069,8 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
// Ask target about the attribute.
const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema();
if (!TargetAttrs.ProcessDeclAttribute(scope, D, Attr, S))
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ S.Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored)
+ << Attr.getName();
break;
}
}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
index 148d146..bd97df2 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -871,6 +871,17 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
// C++ class member Handling
//===----------------------------------------------------------------------===//
+/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon.
+Sema::DeclPtrTy
+Sema::ActOnAccessSpecifier(AccessSpecifier Access,
+ SourceLocation ASLoc, SourceLocation ColonLoc) {
+ assert(Access != AS_none && "Invalid kind for syntactic access specifier!");
+ AccessSpecDecl* ASDecl = AccessSpecDecl::Create(Context, Access, CurContext,
+ ASLoc, ColonLoc);
+ CurContext->addHiddenDecl(ASDecl);
+ return DeclPtrTy::make(ASDecl);
+}
+
/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
/// bitfield width if there is one and 'InitExpr' specifies the initializer if
@@ -886,10 +897,18 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
Expr *Init = static_cast<Expr*>(InitExpr);
SourceLocation Loc = D.getIdentifierLoc();
- bool isFunc = D.isFunctionDeclarator();
-
+ assert(isa<CXXRecordDecl>(CurContext));
assert(!DS.isFriendSpecified());
+ bool isFunc = false;
+ if (D.isFunctionDeclarator())
+ isFunc = true;
+ else if (D.getNumTypeObjects() == 0 &&
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename) {
+ QualType TDType = GetTypeFromParser(DS.getTypeRep());
+ isFunc = TDType->isFunctionType();
+ }
+
// C++ 9.2p6: A member shall not be declared to have automatic storage
// duration (auto, register) or with the extern storage-class-specifier.
// C++ 7.1.1p8: The mutable specifier can be applied only to names of class
@@ -911,22 +930,6 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
// FIXME: It would be nicer if the keyword was ignored only for this
// declarator. Otherwise we could get follow-up errors.
D.getMutableDeclSpec().ClearStorageClassSpecs();
- } else {
- QualType T = GetTypeForDeclarator(D, S);
- diag::kind err = static_cast<diag::kind>(0);
- if (T->isReferenceType())
- err = diag::err_mutable_reference;
- else if (T.isConstQualified())
- err = diag::err_mutable_const;
- if (err != 0) {
- if (DS.getStorageClassSpecLoc().isValid())
- Diag(DS.getStorageClassSpecLoc(), err);
- else
- Diag(DS.getThreadSpecLoc(), err);
- // FIXME: It would be nicer if the keyword was ignored only for this
- // declarator. Otherwise we could get follow-up errors.
- D.getMutableDeclSpec().ClearStorageClassSpecs();
- }
}
break;
default:
@@ -938,18 +941,6 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
D.getMutableDeclSpec().ClearStorageClassSpecs();
}
- if (!isFunc &&
- D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename &&
- D.getNumTypeObjects() == 0) {
- // Check also for this case:
- //
- // typedef int f();
- // f a;
- //
- QualType TDType = GetTypeFromParser(DS.getTypeRep());
- isFunc = TDType->isFunctionType();
- }
-
bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
!isFunc);
@@ -1148,6 +1139,7 @@ Sema::ActOnMemInitializer(DeclPtrTy ConstructorD,
return true;
R.clear();
+ R.setLookupName(MemberOrBase);
}
}
@@ -1226,18 +1218,25 @@ Sema::ActOnMemInitializer(DeclPtrTy ConstructorD,
/// containing the field that is being initialized. Returns true if there is an
/// uninitialized field was used an updates the SourceLocation parameter; false
/// otherwise.
-static bool InitExprContainsUninitializedFields(const Stmt* S,
- const FieldDecl* LhsField,
- SourceLocation* L) {
- const MemberExpr* ME = dyn_cast<MemberExpr>(S);
- if (ME) {
- const NamedDecl* RhsField = ME->getMemberDecl();
+static bool InitExprContainsUninitializedFields(const Stmt *S,
+ const FieldDecl *LhsField,
+ SourceLocation *L) {
+ if (isa<CallExpr>(S)) {
+ // Do not descend into function calls or constructors, as the use
+ // of an uninitialized field may be valid. One would have to inspect
+ // the contents of the function/ctor to determine if it is safe or not.
+ // i.e. Pass-by-value is never safe, but pass-by-reference and pointers
+ // may be safe, depending on what the function/ctor does.
+ return false;
+ }
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ const NamedDecl *RhsField = ME->getMemberDecl();
if (RhsField == LhsField) {
// Initializing a field with itself. Throw a warning.
// But wait; there are exceptions!
// Exception #1: The field may not belong to this record.
// e.g. Foo(const Foo& rhs) : A(rhs.A) {}
- const Expr* base = ME->getBase();
+ const Expr *base = ME->getBase();
if (base != NULL && !isa<CXXThisExpr>(base->IgnoreParenCasts())) {
// Even though the field matches, it does not belong to this record.
return false;
@@ -1248,21 +1247,16 @@ static bool InitExprContainsUninitializedFields(const Stmt* S,
return true;
}
}
- bool found = false;
- for (Stmt::const_child_iterator it = S->child_begin();
- it != S->child_end() && found == false;
- ++it) {
- if (isa<CallExpr>(S)) {
- // Do not descend into function calls or constructors, as the use
- // of an uninitialized field may be valid. One would have to inspect
- // the contents of the function/ctor to determine if it is safe or not.
- // i.e. Pass-by-value is never safe, but pass-by-reference and pointers
- // may be safe, depending on what the function/ctor does.
+ for (Stmt::const_child_iterator it = S->child_begin(), e = S->child_end();
+ it != e; ++it) {
+ if (!*it) {
+ // An expression such as 'member(arg ?: "")' may trigger this.
continue;
}
- found = InitExprContainsUninitializedFields(*it, LhsField, L);
+ if (InitExprContainsUninitializedFields(*it, LhsField, L))
+ return true;
}
- return found;
+ return false;
}
Sema::MemInitResult
@@ -1375,8 +1369,48 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
for (unsigned i = 0; i < NumArgs; i++)
HasDependentArg |= Args[i]->isTypeDependent();
- SourceLocation BaseLoc = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
- if (BaseType->isDependentType() || HasDependentArg) {
+ SourceLocation BaseLoc
+ = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ if (!BaseType->isDependentType() && !BaseType->isRecordType())
+ return Diag(BaseLoc, diag::err_base_init_does_not_name_class)
+ << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+
+ // C++ [class.base.init]p2:
+ // [...] Unless the mem-initializer-id names a nonstatic data
+ // member of the constructor’s class or a direct or virtual base
+ // of that class, the mem-initializer is ill-formed. A
+ // mem-initializer-list can initialize a base class using any
+ // name that denotes that base class type.
+ bool Dependent = BaseType->isDependentType() || HasDependentArg;
+
+ // Check for direct and virtual base classes.
+ const CXXBaseSpecifier *DirectBaseSpec = 0;
+ const CXXBaseSpecifier *VirtualBaseSpec = 0;
+ if (!Dependent) {
+ FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
+ VirtualBaseSpec);
+
+ // C++ [base.class.init]p2:
+ // Unless the mem-initializer-id names a nonstatic data member of the
+ // constructor's class or a direct or virtual base of that class, the
+ // mem-initializer is ill-formed.
+ if (!DirectBaseSpec && !VirtualBaseSpec) {
+ // If the class has any dependent bases, then it's possible that
+ // one of those types will resolve to the same type as
+ // BaseType. Therefore, just treat this as a dependent base
+ // class initialization. FIXME: Should we try to check the
+ // initialization anyway? It seems odd.
+ if (ClassDecl->hasAnyDependentBases())
+ Dependent = true;
+ else
+ return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
+ << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseTInfo->getTypeLoc().getLocalSourceRange();
+ }
+ }
+
+ if (Dependent) {
// Can't check initialization for a base of dependent type or when
// any of the arguments are type-dependent expressions.
OwningExprResult BaseInit
@@ -1396,23 +1430,6 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
BaseInit.takeAs<Expr>(),
RParenLoc);
}
-
- if (!BaseType->isRecordType())
- return Diag(BaseLoc, diag::err_base_init_does_not_name_class)
- << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
-
- // C++ [class.base.init]p2:
- // [...] Unless the mem-initializer-id names a nonstatic data
- // member of the constructor’s class or a direct or virtual base
- // of that class, the mem-initializer is ill-formed. A
- // mem-initializer-list can initialize a base class using any
- // name that denotes that base class type.
-
- // Check for direct and virtual base classes.
- const CXXBaseSpecifier *DirectBaseSpec = 0;
- const CXXBaseSpecifier *VirtualBaseSpec = 0;
- FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
- VirtualBaseSpec);
// C++ [base.class.init]p2:
// If a mem-initializer-id is ambiguous because it designates both
@@ -1421,14 +1438,6 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
if (DirectBaseSpec && VirtualBaseSpec)
return Diag(BaseLoc, diag::err_base_init_direct_and_virtual)
<< BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
- // C++ [base.class.init]p2:
- // Unless the mem-initializer-id names a nonstatic data membeer of the
- // constructor's class ot a direst or virtual base of that class, the
- // mem-initializer is ill-formed.
- if (!DirectBaseSpec && !VirtualBaseSpec)
- return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
- << BaseType << Context.getTypeDeclType(ClassDecl)
- << BaseTInfo->getTypeLoc().getLocalSourceRange();
CXXBaseSpecifier *BaseSpec
= const_cast<CXXBaseSpecifier *>(DirectBaseSpec);
@@ -1571,8 +1580,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (Field->isInvalidDecl())
return true;
+ SourceLocation Loc = Constructor->getLocation();
+
if (ImplicitInitKind == IIK_Copy) {
- SourceLocation Loc = Constructor->getLocation();
ParmVarDecl *Param = Constructor->getParamDecl(0);
QualType ParamType = Param->getType().getNonReferenceType();
@@ -1680,7 +1690,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
if (FieldBaseElementType->isRecordType()) {
InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
InitializationKind InitKind =
- InitializationKind::CreateDefault(Constructor->getLocation());
+ InitializationKind::CreateDefault(Loc);
InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0);
Sema::OwningExprResult MemberInit =
@@ -1692,10 +1702,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
CXXMemberInit =
new (SemaRef.Context) CXXBaseOrMemberInitializer(SemaRef.Context,
- Field, SourceLocation(),
- SourceLocation(),
+ Field, Loc, Loc,
MemberInit.takeAs<Expr>(),
- SourceLocation());
+ Loc);
return false;
}
@@ -1744,38 +1753,67 @@ struct BaseAndFieldInfo {
};
}
+static void RecordFieldInitializer(BaseAndFieldInfo &Info,
+ FieldDecl *Top, FieldDecl *Field,
+ CXXBaseOrMemberInitializer *Init) {
+ // If the member doesn't need to be initialized, Init will still be null.
+ if (!Init)
+ return;
+
+ Info.AllToInit.push_back(Init);
+ if (Field != Top) {
+ Init->setMember(Top);
+ Init->setAnonUnionMember(Field);
+ }
+}
+
static bool CollectFieldInitializer(BaseAndFieldInfo &Info,
FieldDecl *Top, FieldDecl *Field) {
- // Overwhelmingly common case: we have a direct initializer for this field.
+ // Overwhelmingly common case: we have a direct initializer for this field.
if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(Field)) {
- Info.AllToInit.push_back(Init);
-
- if (Field != Top) {
- Init->setMember(Top);
- Init->setAnonUnionMember(Field);
- }
+ RecordFieldInitializer(Info, Top, Field, Init);
return false;
}
if (Info.IIK == IIK_Default && Field->isAnonymousStructOrUnion()) {
const RecordType *FieldClassType = Field->getType()->getAs<RecordType>();
assert(FieldClassType && "anonymous struct/union without record type");
-
- // Walk through the members, tying in any initializers for fields
- // we find. The earlier semantic checks should prevent redundant
- // initialization of union members, given the requirement that
- // union members never have non-trivial default constructors.
-
- // TODO: in C++0x, it might be legal to have union members with
- // non-trivial default constructors in unions. Revise this
- // implementation then with the appropriate semantics.
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
- for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(),
- EA = FieldClassDecl->field_end(); FA != EA; FA++)
- if (CollectFieldInitializer(Info, Top, *FA))
- return true;
+
+ // Even though union members never have non-trivial default
+ // constructions in C++03, we still build member initializers for aggregate
+ // record types which can be union members, and C++0x allows non-trivial
+ // default constructors for union members, so we ensure that only one
+ // member is initialized for these.
+ if (FieldClassDecl->isUnion()) {
+ // First check for an explicit initializer for one field.
+ for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(),
+ EA = FieldClassDecl->field_end(); FA != EA; FA++) {
+ if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(*FA)) {
+ RecordFieldInitializer(Info, Top, *FA, Init);
+
+ // Once we've initialized a field of an anonymous union, the union
+ // field in the class is also initialized, so exit immediately.
+ return false;
+ }
+ }
+
+ // Fallthrough and construct a default initializer for the union as
+ // a whole, which can call its default constructor if such a thing exists
+ // (C++0x perhaps). FIXME: It's not clear that this is the correct
+ // behavior going forward with C++0x, when anonymous unions there are
+ // finalized, we should revisit this.
+ } else {
+ // For structs, we simply descend through to initialize all members where
+ // necessary.
+ for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(),
+ EA = FieldClassDecl->field_end(); FA != EA; FA++) {
+ if (CollectFieldInitializer(Info, Top, *FA))
+ return true;
+ }
+ }
}
// Don't try to build an implicit initializer if there were semantic
@@ -1787,15 +1825,8 @@ static bool CollectFieldInitializer(BaseAndFieldInfo &Info,
CXXBaseOrMemberInitializer *Init = 0;
if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field, Init))
return true;
-
- // If the member doesn't need to be initialized, Init will still be null.
- if (!Init) return false;
- Info.AllToInit.push_back(Init);
- if (Top != Field) {
- Init->setMember(Top);
- Init->setAnonUnionMember(Field);
- }
+ RecordFieldInitializer(Info, Top, Field, Init);
return false;
}
@@ -2199,7 +2230,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
if (FieldClassDecl->hasTrivialDestructor())
continue;
- CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(Context);
+ CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl);
CheckDestructorAccess(Field->getLocation(), Dtor,
PDiag(diag::err_access_dtor_field)
<< Field->getDeclName()
@@ -2225,7 +2256,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
if (BaseClassDecl->hasTrivialDestructor())
continue;
- CXXDestructorDecl *Dtor = BaseClassDecl->getDestructor(Context);
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
// FIXME: caret should be on the start of the class name
CheckDestructorAccess(Base->getSourceRange().getBegin(), Dtor,
@@ -2252,7 +2283,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
if (BaseClassDecl->hasTrivialDestructor())
continue;
- CXXDestructorDecl *Dtor = BaseClassDecl->getDestructor(Context);
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
CheckDestructorAccess(ClassDecl->getLocation(), Dtor,
PDiag(diag::err_access_dtor_vbase)
<< VBase->getType());
@@ -2326,6 +2357,10 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
CXXFinalOverriderMap FinalOverriders;
RD->getFinalOverriders(FinalOverriders);
+ // Keep a set of seen pure methods so we won't diagnose the same method
+ // more than once.
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> SeenPureMethods;
+
for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
MEnd = FinalOverriders.end();
M != MEnd;
@@ -2345,6 +2380,9 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
if (!SO->second.front().Method->isPure())
continue;
+ if (!SeenPureMethods.insert(SO->second.front().Method))
+ continue;
+
Diag(SO->second.front().Method->getLocation(),
diag::note_pure_virtual_function)
<< SO->second.front().Method->getDeclName();
@@ -2422,12 +2460,12 @@ namespace {
/// \brief Perform semantic checks on a class definition that has been
/// completing, introducing implicitly-declared members, checking for
/// abstract types, etc.
-void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
+void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
if (!Record || Record->isInvalidDecl())
return;
if (!Record->isDependentType())
- AddImplicitlyDeclaredMembersToClass(S, Record);
+ AddImplicitlyDeclaredMembersToClass(Record);
if (Record->isInvalidDecl())
return;
@@ -2546,268 +2584,101 @@ void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
(DeclPtrTy*)FieldCollector->getCurFields(),
FieldCollector->getCurNumFields(), LBrac, RBrac, AttrList);
- CheckCompletedCXXClass(S,
- dyn_cast_or_null<CXXRecordDecl>(TagDecl.getAs<Decl>()));
+ CheckCompletedCXXClass(
+ dyn_cast_or_null<CXXRecordDecl>(TagDecl.getAs<Decl>()));
+}
+
+namespace {
+ /// \brief Helper class that collects exception specifications for
+ /// implicitly-declared special member functions.
+ class ImplicitExceptionSpecification {
+ ASTContext &Context;
+ bool AllowsAllExceptions;
+ llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
+ llvm::SmallVector<QualType, 4> Exceptions;
+
+ public:
+ explicit ImplicitExceptionSpecification(ASTContext &Context)
+ : Context(Context), AllowsAllExceptions(false) { }
+
+ /// \brief Whether the special member function should have any
+ /// exception specification at all.
+ bool hasExceptionSpecification() const {
+ return !AllowsAllExceptions;
+ }
+
+ /// \brief Whether the special member function should have a
+ /// throw(...) exception specification (a Microsoft extension).
+ bool hasAnyExceptionSpecification() const {
+ return false;
+ }
+
+ /// \brief The number of exceptions in the exception specification.
+ unsigned size() const { return Exceptions.size(); }
+
+ /// \brief The set of exceptions in the exception specification.
+ const QualType *data() const { return Exceptions.data(); }
+
+ /// \brief Note that
+ void CalledDecl(CXXMethodDecl *Method) {
+ // If we already know that we allow all exceptions, do nothing.
+ if (AllowsAllExceptions || !Method)
+ return;
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+
+ // If this function can throw any exceptions, make a note of that.
+ if (!Proto->hasExceptionSpec() || Proto->hasAnyExceptionSpec()) {
+ AllowsAllExceptions = true;
+ ExceptionsSeen.clear();
+ Exceptions.clear();
+ return;
+ }
+
+ // Record the exceptions in this function's exception specification.
+ for (FunctionProtoType::exception_iterator E = Proto->exception_begin(),
+ EEnd = Proto->exception_end();
+ E != EEnd; ++E)
+ if (ExceptionsSeen.insert(Context.getCanonicalType(*E)))
+ Exceptions.push_back(*E);
+ }
+ };
}
+
/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
/// special functions, such as the default constructor, copy
/// constructor, or destructor, to the given C++ class (C++
/// [special]p1). This routine can only be executed just before the
/// definition of the class is complete.
-///
-/// The scope, if provided, is the class scope.
-void Sema::AddImplicitlyDeclaredMembersToClass(Scope *S,
- CXXRecordDecl *ClassDecl) {
- CanQualType ClassType
- = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
-
- // FIXME: Implicit declarations have exception specifications, which are
- // the union of the specifications of the implicitly called functions.
-
- if (!ClassDecl->hasUserDeclaredConstructor()) {
- // C++ [class.ctor]p5:
- // A default constructor for a class X is a constructor of class X
- // that can be called without an argument. If there is no
- // user-declared constructor for class X, a default constructor is
- // implicitly declared. An implicitly-declared default constructor
- // is an inline public member of its class.
- DeclarationName Name
- = Context.DeclarationNames.getCXXConstructorName(ClassType);
- CXXConstructorDecl *DefaultCon =
- CXXConstructorDecl::Create(Context, ClassDecl,
- ClassDecl->getLocation(), Name,
- Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0,
- /*FIXME*/false, false,
- 0, 0,
- FunctionType::ExtInfo()),
- /*TInfo=*/0,
- /*isExplicit=*/false,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true);
- DefaultCon->setAccess(AS_public);
- DefaultCon->setImplicit();
- DefaultCon->setTrivial(ClassDecl->hasTrivialConstructor());
- if (S)
- PushOnScopeChains(DefaultCon, S, true);
- else
- ClassDecl->addDecl(DefaultCon);
- }
+void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
+ if (!ClassDecl->hasUserDeclaredConstructor())
+ ++ASTContext::NumImplicitDefaultConstructors;
- if (!ClassDecl->hasUserDeclaredCopyConstructor()) {
- // C++ [class.copy]p4:
- // If the class definition does not explicitly declare a copy
- // constructor, one is declared implicitly.
-
- // C++ [class.copy]p5:
- // The implicitly-declared copy constructor for a class X will
- // have the form
- //
- // X::X(const X&)
- //
- // if
- bool HasConstCopyConstructor = true;
-
- // -- each direct or virtual base class B of X has a copy
- // constructor whose first parameter is of type const B& or
- // const volatile B&, and
- for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin();
- HasConstCopyConstructor && Base != ClassDecl->bases_end(); ++Base) {
- const CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- HasConstCopyConstructor
- = BaseClassDecl->hasConstCopyConstructor(Context);
- }
-
- // -- for all the nonstatic data members of X that are of a
- // class type M (or array thereof), each such class type
- // has a copy constructor whose first parameter is of type
- // const M& or const volatile M&.
- for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin();
- HasConstCopyConstructor && Field != ClassDecl->field_end();
- ++Field) {
- QualType FieldType = (*Field)->getType();
- if (const ArrayType *Array = Context.getAsArrayType(FieldType))
- FieldType = Array->getElementType();
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- const CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
- HasConstCopyConstructor
- = FieldClassDecl->hasConstCopyConstructor(Context);
- }
- }
-
- // Otherwise, the implicitly declared copy constructor will have
- // the form
- //
- // X::X(X&)
- QualType ArgType = ClassType;
- if (HasConstCopyConstructor)
- ArgType = ArgType.withConst();
- ArgType = Context.getLValueReferenceType(ArgType);
-
- // An implicitly-declared copy constructor is an inline public
- // member of its class.
- DeclarationName Name
- = Context.DeclarationNames.getCXXConstructorName(ClassType);
- CXXConstructorDecl *CopyConstructor
- = CXXConstructorDecl::Create(Context, ClassDecl,
- ClassDecl->getLocation(), Name,
- Context.getFunctionType(Context.VoidTy,
- &ArgType, 1,
- false, 0,
- /*FIXME: hasExceptionSpec*/false,
- false, 0, 0,
- FunctionType::ExtInfo()),
- /*TInfo=*/0,
- /*isExplicit=*/false,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true);
- CopyConstructor->setAccess(AS_public);
- CopyConstructor->setImplicit();
- CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
-
- // Add the parameter to the constructor.
- ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
- ClassDecl->getLocation(),
- /*IdentifierInfo=*/0,
- ArgType, /*TInfo=*/0,
- VarDecl::None,
- VarDecl::None, 0);
- CopyConstructor->setParams(&FromParam, 1);
- if (S)
- PushOnScopeChains(CopyConstructor, S, true);
- else
- ClassDecl->addDecl(CopyConstructor);
- }
+ if (!ClassDecl->hasUserDeclaredCopyConstructor())
+ ++ASTContext::NumImplicitCopyConstructors;
if (!ClassDecl->hasUserDeclaredCopyAssignment()) {
- // Note: The following rules are largely analoguous to the copy
- // constructor rules. Note that virtual bases are not taken into account
- // for determining the argument type of the operator. Note also that
- // operators taking an object instead of a reference are allowed.
- //
- // C++ [class.copy]p10:
- // If the class definition does not explicitly declare a copy
- // assignment operator, one is declared implicitly.
- // The implicitly-defined copy assignment operator for a class X
- // will have the form
- //
- // X& X::operator=(const X&)
- //
- // if
- bool HasConstCopyAssignment = true;
-
- // -- each direct base class B of X has a copy assignment operator
- // whose parameter is of type const B&, const volatile B& or B,
- // and
- for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin();
- HasConstCopyAssignment && Base != ClassDecl->bases_end(); ++Base) {
- assert(!Base->getType()->isDependentType() &&
- "Cannot generate implicit members for class with dependent bases.");
- const CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- const CXXMethodDecl *MD = 0;
- HasConstCopyAssignment = BaseClassDecl->hasConstCopyAssignment(Context,
- MD);
- }
-
- // -- for all the nonstatic data members of X that are of a class
- // type M (or array thereof), each such class type has a copy
- // assignment operator whose parameter is of type const M&,
- // const volatile M& or M.
- for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin();
- HasConstCopyAssignment && Field != ClassDecl->field_end();
- ++Field) {
- QualType FieldType = (*Field)->getType();
- if (const ArrayType *Array = Context.getAsArrayType(FieldType))
- FieldType = Array->getElementType();
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- const CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
- const CXXMethodDecl *MD = 0;
- HasConstCopyAssignment
- = FieldClassDecl->hasConstCopyAssignment(Context, MD);
- }
- }
-
- // Otherwise, the implicitly declared copy assignment operator will
- // have the form
- //
- // X& X::operator=(X&)
- QualType ArgType = ClassType;
- QualType RetType = Context.getLValueReferenceType(ArgType);
- if (HasConstCopyAssignment)
- ArgType = ArgType.withConst();
- ArgType = Context.getLValueReferenceType(ArgType);
-
- // An implicitly-declared copy assignment operator is an inline public
- // member of its class.
- DeclarationName Name =
- Context.DeclarationNames.getCXXOperatorName(OO_Equal);
- CXXMethodDecl *CopyAssignment =
- CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name,
- Context.getFunctionType(RetType, &ArgType, 1,
- false, 0,
- /*FIXME: hasExceptionSpec*/false,
- false, 0, 0,
- FunctionType::ExtInfo()),
- /*TInfo=*/0, /*isStatic=*/false,
- /*StorageClassAsWritten=*/FunctionDecl::None,
- /*isInline=*/true);
- CopyAssignment->setAccess(AS_public);
- CopyAssignment->setImplicit();
- CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment());
- CopyAssignment->setCopyAssignment(true);
-
- // Add the parameter to the operator.
- ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
- ClassDecl->getLocation(),
- /*Id=*/0,
- ArgType, /*TInfo=*/0,
- VarDecl::None,
- VarDecl::None, 0);
- CopyAssignment->setParams(&FromParam, 1);
-
- // Don't call addedAssignmentOperator. There is no way to distinguish an
- // implicit from an explicit assignment operator.
- if (S)
- PushOnScopeChains(CopyAssignment, S, true);
- else
- ClassDecl->addDecl(CopyAssignment);
- AddOverriddenMethods(ClassDecl, CopyAssignment);
+ ++ASTContext::NumImplicitCopyAssignmentOperators;
+
+ // If we have a dynamic class, then the copy assignment operator may be
+ // virtual, so we have to declare it immediately. This ensures that, e.g.,
+ // it shows up in the right place in the vtable and that we diagnose
+ // problems with the implicit exception specification.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitCopyAssignment(ClassDecl);
}
if (!ClassDecl->hasUserDeclaredDestructor()) {
- // C++ [class.dtor]p2:
- // If a class has no user-declared destructor, a destructor is
- // declared implicitly. An implicitly-declared destructor is an
- // inline public member of its class.
- QualType Ty = Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0,
- /*FIXME: hasExceptionSpec*/false,
- false, 0, 0, FunctionType::ExtInfo());
-
- DeclarationName Name
- = Context.DeclarationNames.getCXXDestructorName(ClassType);
- CXXDestructorDecl *Destructor
- = CXXDestructorDecl::Create(Context, ClassDecl,
- ClassDecl->getLocation(), Name, Ty,
- /*isInline=*/true,
- /*isImplicitlyDeclared=*/true);
- Destructor->setAccess(AS_public);
- Destructor->setImplicit();
- Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
- if (S)
- PushOnScopeChains(Destructor, S, true);
- else
- ClassDecl->addDecl(Destructor);
-
- // This could be uniqued if it ever proves significant.
- Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty));
+ ++ASTContext::NumImplicitDestructors;
- AddOverriddenMethods(ClassDecl, Destructor);
+ // If we have a dynamic class, then the destructor may be virtual, so we
+ // have to declare the destructor immediately. This ensures that, e.g., it
+ // shows up in the right place in the vtable and that we diagnose problems
+ // with the implicit exception specification.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitDestructor(ClassDecl);
}
}
@@ -2952,9 +2823,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
// Rebuild the function type "R" without any type qualifiers (in
// case any of the errors above fired) and with "void" as the
- // return type, since constructors don't have return types. We
- // *always* have to do this, because GetTypeForDeclarator will
- // put in a result type of "int" when none was specified.
+ // return type, since constructors don't have return types.
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(),
Proto->getNumArgs(),
@@ -2990,8 +2859,11 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
QualType ClassTy = Context.getTagDeclType(ClassDecl);
if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
+ const char *ConstRef
+ = Constructor->getParamDecl(0)->getIdentifier() ? "const &"
+ : " const &";
Diag(ParamLoc, diag::err_constructor_byvalue_arg)
- << FixItHint::CreateInsertion(ParamLoc, " const &");
+ << FixItHint::CreateInsertion(ParamLoc, ConstRef);
// FIXME: Rather that making the constructor invalid, we should endeavor
// to fix the type.
@@ -3026,6 +2898,8 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
Context.DeclarationNames.getCXXOperatorName(OO_Delete);
if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
return true;
+
+ MarkDeclarationReferenced(Loc, OperatorDelete);
Destructor->setOperatorDelete(OperatorDelete);
}
@@ -3046,7 +2920,7 @@ FTIHasSingleVoidArgument(DeclaratorChunk::FunctionTypeInfo &FTI) {
/// emit diagnostics and set the declarator to invalid. Even if this happens,
/// will be updated to reflect a well-formed type for the destructor and
/// returned.
-QualType Sema::CheckDestructorDeclarator(Declarator &D,
+QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
FunctionDecl::StorageClass& SC) {
// C++ [class.dtor]p1:
// [...] A typedef-name that names a class is a class-name
@@ -3054,11 +2928,9 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D,
// be used as the identifier in the declarator for a destructor
// declaration.
QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName);
- if (isa<TypedefType>(DeclaratorType)) {
+ if (isa<TypedefType>(DeclaratorType))
Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
<< DeclaratorType;
- D.setInvalidType();
- }
// C++ [class.dtor]p2:
// A destructor is used to destroy objects of its class type. A
@@ -3072,9 +2944,10 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D,
if (!D.isInvalidType())
Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be)
<< "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
- << SourceRange(D.getIdentifierLoc());
+ << SourceRange(D.getIdentifierLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+
SC = FunctionDecl::None;
- D.setInvalidType();
}
if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
// Destructors don't have return types, but the parser will
@@ -3122,11 +2995,17 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D,
// Rebuild the function type "R" without any type qualifiers or
// parameters (in case any of the errors above fired) and with
// "void" as the return type, since destructors don't have return
- // types. We *always* have to do this, because GetTypeForDeclarator
- // will put in a result type of "int" when none was specified.
- // FIXME: Exceptions!
+ // types.
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ if (!Proto)
+ return QualType();
+
return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0,
- false, false, 0, 0, FunctionType::ExtInfo());
+ Proto->hasExceptionSpec(),
+ Proto->hasAnyExceptionSpec(),
+ Proto->getNumExceptions(),
+ Proto->exception_begin(),
+ Proto->getExtInfo());
}
/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
@@ -3434,6 +3313,21 @@ void Sema::ActOnFinishNamespaceDef(DeclPtrTy D, SourceLocation RBrace) {
PopDeclContext();
}
+/// \brief Retrieve the special "std" namespace, which may require us to
+/// implicitly define the namespace.
+NamespaceDecl *Sema::getStdNamespace() {
+ if (!StdNamespace) {
+ // The "std" namespace has not yet been defined, so build one implicitly.
+ StdNamespace = NamespaceDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ &PP.getIdentifierTable().get("std"));
+ StdNamespace->setImplicit(true);
+ }
+
+ return StdNamespace;
+}
+
Sema::DeclPtrTy Sema::ActOnUsingDirective(Scope *S,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
@@ -3447,13 +3341,49 @@ Sema::DeclPtrTy Sema::ActOnUsingDirective(Scope *S,
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
UsingDirectiveDecl *UDir = 0;
-
+ NestedNameSpecifier *Qualifier = 0;
+ if (SS.isSet())
+ Qualifier = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+
// Lookup namespace name.
LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
LookupParsedName(R, S, &SS);
if (R.isAmbiguous())
return DeclPtrTy();
+ if (R.empty()) {
+ // Allow "using namespace std;" or "using namespace ::std;" even if
+ // "std" hasn't been defined yet, for GCC compatibility.
+ if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
+ NamespcName->isStr("std")) {
+ Diag(IdentLoc, diag::ext_using_undefined_std);
+ R.addDecl(getStdNamespace());
+ R.resolveKind();
+ }
+ // Otherwise, attempt typo correction.
+ else if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false,
+ CTC_NoKeywords, 0)) {
+ if (R.getAsSingle<NamespaceDecl>() ||
+ R.getAsSingle<NamespaceAliasDecl>()) {
+ if (DeclContext *DC = computeDeclContext(SS, false))
+ Diag(IdentLoc, diag::err_using_directive_member_suggest)
+ << NamespcName << DC << Corrected << SS.getRange()
+ << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
+ else
+ Diag(IdentLoc, diag::err_using_directive_suggest)
+ << NamespcName << Corrected
+ << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
+ Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here)
+ << Corrected;
+
+ NamespcName = Corrected.getAsIdentifierInfo();
+ } else {
+ R.clear();
+ R.setLookupName(NamespcName);
+ }
+ }
+ }
+
if (!R.empty()) {
NamedDecl *Named = R.getFoundDecl();
assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named))
@@ -3566,6 +3496,28 @@ Sema::DeclPtrTy Sema::ActOnUsingDeclaration(Scope *S,
return DeclPtrTy::make(UD);
}
+/// \brief Determine whether a using declaration considers the given
+/// declarations as "equivalent", e.g., if they are redeclarations of
+/// the same entity or are both typedefs of the same type.
+static bool
+IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2,
+ bool &SuppressRedeclaration) {
+ if (D1->getCanonicalDecl() == D2->getCanonicalDecl()) {
+ SuppressRedeclaration = false;
+ return true;
+ }
+
+ if (TypedefDecl *TD1 = dyn_cast<TypedefDecl>(D1))
+ if (TypedefDecl *TD2 = dyn_cast<TypedefDecl>(D2)) {
+ SuppressRedeclaration = true;
+ return Context.hasSameType(TD1->getUnderlyingType(),
+ TD2->getUnderlyingType());
+ }
+
+ return false;
+}
+
+
/// Determines whether to create a using shadow decl for a particular
/// decl, given the set of decls existing prior to this using lookup.
bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
@@ -3632,8 +3584,9 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
- if (D->getCanonicalDecl() == Target->getCanonicalDecl())
- return false;
+ bool Result;
+ if (IsEquivalentForUsingDecl(Context, D, Target, Result))
+ return Result;
(isa<TagDecl>(D) ? Tag : NonTag) = D;
}
@@ -3646,7 +3599,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
FD = cast<FunctionDecl>(Target);
NamedDecl *OldDecl = 0;
- switch (CheckOverload(FD, Previous, OldDecl)) {
+ switch (CheckOverload(0, FD, Previous, OldDecl, /*IsForUsingDecl*/ true)) {
case Ovl_Overload:
return false;
@@ -3656,11 +3609,6 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// We found a decl with the exact signature.
case Ovl_Match:
- if (isa<UsingShadowDecl>(OldDecl)) {
- // Silently ignore the possible conflict.
- return false;
- }
-
// If we're in a record, we want to hide the target, so we
// return true (without a diagnostic) to tell the caller not to
// build a shadow decl.
@@ -4162,8 +4110,33 @@ Sema::DeclPtrTy Sema::ActOnNamespaceAliasDef(Scope *S,
return DeclPtrTy();
if (R.empty()) {
- Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange();
- return DeclPtrTy();
+ if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false,
+ CTC_NoKeywords, 0)) {
+ if (R.getAsSingle<NamespaceDecl>() ||
+ R.getAsSingle<NamespaceAliasDecl>()) {
+ if (DeclContext *DC = computeDeclContext(SS, false))
+ Diag(IdentLoc, diag::err_using_directive_member_suggest)
+ << Ident << DC << Corrected << SS.getRange()
+ << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
+ else
+ Diag(IdentLoc, diag::err_using_directive_suggest)
+ << Ident << Corrected
+ << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString());
+
+ Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here)
+ << Corrected;
+
+ Ident = Corrected.getAsIdentifierInfo();
+ } else {
+ R.clear();
+ R.setLookupName(Ident);
+ }
+ }
+
+ if (R.empty()) {
+ Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange();
+ return DeclPtrTy();
+ }
}
NamespaceAliasDecl *AliasDecl =
@@ -4200,10 +4173,108 @@ namespace {
};
}
+CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class X
+ // that can be called without an argument. If there is no
+ // user-declared constructor for class X, a default constructor is
+ // implicitly declared. An implicitly-declared default constructor
+ // is an inline public member of its class.
+ assert(!ClassDecl->hasUserDeclaredConstructor() &&
+ "Should not build implicit default constructor!");
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+
+ // Direct base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ if (B->isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (!BaseClassDecl->hasDeclaredDefaultConstructor())
+ ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl));
+ else if (CXXConstructorDecl *Constructor
+ = BaseClassDecl->getDefaultConstructor())
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Virtual base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
+ BEnd = ClassDecl->vbases_end();
+ B != BEnd; ++B) {
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (!BaseClassDecl->hasDeclaredDefaultConstructor())
+ ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl));
+ else if (CXXConstructorDecl *Constructor
+ = BaseClassDecl->getDefaultConstructor())
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Field destructors.
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!FieldClassDecl->hasDeclaredDefaultConstructor())
+ ExceptSpec.CalledDecl(
+ DeclareImplicitDefaultConstructor(FieldClassDecl));
+ else if (CXXConstructorDecl *Constructor
+ = FieldClassDecl->getDefaultConstructor())
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+
+ // Create the actual constructor declaration.
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(ClassType);
+ CXXConstructorDecl *DefaultCon
+ = CXXConstructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name,
+ Context.getFunctionType(Context.VoidTy,
+ 0, 0, false, 0,
+ ExceptSpec.hasExceptionSpecification(),
+ ExceptSpec.hasAnyExceptionSpecification(),
+ ExceptSpec.size(),
+ ExceptSpec.data(),
+ FunctionType::ExtInfo()),
+ /*TInfo=*/0,
+ /*isExplicit=*/false,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ DefaultCon->setAccess(AS_public);
+ DefaultCon->setImplicit();
+ DefaultCon->setTrivial(ClassDecl->hasTrivialConstructor());
+
+ // Note that we have declared this constructor.
+ ClassDecl->setDeclaredDefaultConstructor(true);
+ ++ASTContext::NumImplicitDefaultConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(DefaultCon, S, false);
+ ClassDecl->addDecl(DefaultCon);
+
+ return DefaultCon;
+}
+
void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor) {
assert((Constructor->isImplicit() && Constructor->isDefaultConstructor() &&
- !Constructor->isUsed()) &&
+ !Constructor->isUsed(false)) &&
"DefineImplicitDefaultConstructor - call it for implicit default ctor");
CXXRecordDecl *ClassDecl = Constructor->getParent();
@@ -4222,9 +4293,90 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
}
}
+CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
+ // C++ [class.dtor]p2:
+ // If a class has no user-declared destructor, a destructor is
+ // declared implicitly. An implicitly-declared destructor is an
+ // inline public member of its class.
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have
+ // an exception-specification.
+ ImplicitExceptionSpecification ExceptSpec(Context);
+
+ // Direct base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ if (B->isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Virtual base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
+ BEnd = ClassDecl->vbases_end();
+ B != BEnd; ++B) {
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Field destructors.
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(RecordTy->getDecl())));
+ }
+
+ // Create the actual destructor declaration.
+ QualType Ty = Context.getFunctionType(Context.VoidTy,
+ 0, 0, false, 0,
+ ExceptSpec.hasExceptionSpecification(),
+ ExceptSpec.hasAnyExceptionSpecification(),
+ ExceptSpec.size(),
+ ExceptSpec.data(),
+ FunctionType::ExtInfo());
+
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(ClassType);
+ CXXDestructorDecl *Destructor
+ = CXXDestructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name, Ty,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ Destructor->setAccess(AS_public);
+ Destructor->setImplicit();
+ Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
+
+ // Note that we have declared this destructor.
+ ClassDecl->setDeclaredDestructor(true);
+ ++ASTContext::NumImplicitDestructorsDeclared;
+
+ // Introduce this destructor into its scope.
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(Destructor, S, false);
+ ClassDecl->addDecl(Destructor);
+
+ // This could be uniqued if it ever proves significant.
+ Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty));
+
+ AddOverriddenMethods(ClassDecl, Destructor);
+
+ return Destructor;
+}
+
void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor) {
- assert((Destructor->isImplicit() && !Destructor->isUsed()) &&
+ assert((Destructor->isImplicit() && !Destructor->isUsed(false)) &&
"DefineImplicitDestructor - call it for implicit default dtor");
CXXRecordDecl *ClassDecl = Destructor->getParent();
assert(ClassDecl && "DefineImplicitDestructor - invalid destructor");
@@ -4448,12 +4600,197 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
Loc, move(Copy));
}
+/// \brief Determine whether the given class has a copy assignment operator
+/// that accepts a const-qualified argument.
+static bool hasConstCopyAssignment(Sema &S, const CXXRecordDecl *CClass) {
+ CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(CClass);
+
+ if (!Class->hasDeclaredCopyAssignment())
+ S.DeclareImplicitCopyAssignment(Class);
+
+ QualType ClassType = S.Context.getCanonicalType(S.Context.getTypeDeclType(Class));
+ DeclarationName OpName
+ = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = Class->lookup(OpName); Op != OpEnd; ++Op) {
+ // C++ [class.copy]p9:
+ // A user-declared copy assignment operator is a non-static non-template
+ // member function of class X with exactly one parameter of type X, X&,
+ // const X&, volatile X& or const volatile X&.
+ const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
+ if (!Method)
+ continue;
+
+ if (Method->isStatic())
+ continue;
+ if (Method->getPrimaryTemplate())
+ continue;
+ const FunctionProtoType *FnType =
+ Method->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Overloaded operator has no prototype.");
+ // Don't assert on this; an invalid decl might have been left in the AST.
+ if (FnType->getNumArgs() != 1 || FnType->isVariadic())
+ continue;
+ bool AcceptsConst = true;
+ QualType ArgType = FnType->getArgType(0);
+ if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()){
+ ArgType = Ref->getPointeeType();
+ // Is it a non-const lvalue reference?
+ if (!ArgType.isConstQualified())
+ AcceptsConst = false;
+ }
+ if (!S.Context.hasSameUnqualifiedType(ArgType, ClassType))
+ continue;
+
+ // We have a single argument of type cv X or cv X&, i.e. we've found the
+ // copy assignment operator. Return whether it accepts const arguments.
+ return AcceptsConst;
+ }
+ assert(Class->isInvalidDecl() &&
+ "No copy assignment operator declared in valid code.");
+ return false;
+}
+
+CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
+ // Note: The following rules are largely analoguous to the copy
+ // constructor rules. Note that virtual bases are not taken into account
+ // for determining the argument type of the operator. Note also that
+ // operators taking an object instead of a reference are allowed.
+
+
+ // C++ [class.copy]p10:
+ // If the class definition does not explicitly declare a copy
+ // assignment operator, one is declared implicitly.
+ // The implicitly-defined copy assignment operator for a class X
+ // will have the form
+ //
+ // X& X::operator=(const X&)
+ //
+ // if
+ bool HasConstCopyAssignment = true;
+
+ // -- each direct base class B of X has a copy assignment operator
+ // whose parameter is of type const B&, const volatile B& or B,
+ // and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ assert(!Base->getType()->isDependentType() &&
+ "Cannot generate implicit members for class with dependent bases.");
+ const CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ HasConstCopyAssignment = hasConstCopyAssignment(*this, BaseClassDecl);
+ }
+
+ // -- for all the nonstatic data members of X that are of a class
+ // type M (or array thereof), each such class type has a copy
+ // assignment operator whose parameter is of type const M&,
+ // const volatile M& or M.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ HasConstCopyAssignment && Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ const CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ HasConstCopyAssignment = hasConstCopyAssignment(*this, FieldClassDecl);
+ }
+ }
+
+ // Otherwise, the implicitly declared copy assignment operator will
+ // have the form
+ //
+ // X& X::operator=(X&)
+ QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ if (HasConstCopyAssignment)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (!BaseClassDecl->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(BaseClassDecl);
+
+ if (CXXMethodDecl *CopyAssign
+ = BaseClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+
+ if (!FieldClassDecl->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(FieldClassDecl);
+
+ if (CXXMethodDecl *CopyAssign
+ = FieldClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
+ }
+
+ // An implicitly-declared copy assignment operator is an inline public
+ // member of its class.
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ CXXMethodDecl *CopyAssignment
+ = CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name,
+ Context.getFunctionType(RetType, &ArgType, 1,
+ false, 0,
+ ExceptSpec.hasExceptionSpecification(),
+ ExceptSpec.hasAnyExceptionSpecification(),
+ ExceptSpec.size(),
+ ExceptSpec.data(),
+ FunctionType::ExtInfo()),
+ /*TInfo=*/0, /*isStatic=*/false,
+ /*StorageClassAsWritten=*/FunctionDecl::None,
+ /*isInline=*/true);
+ CopyAssignment->setAccess(AS_public);
+ CopyAssignment->setImplicit();
+ CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment());
+ CopyAssignment->setCopyAssignment(true);
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
+ ClassDecl->getLocation(),
+ /*Id=*/0,
+ ArgType, /*TInfo=*/0,
+ VarDecl::None,
+ VarDecl::None, 0);
+ CopyAssignment->setParams(&FromParam, 1);
+
+ // Note that we have added this copy-assignment operator.
+ ClassDecl->setDeclaredCopyAssignment(true);
+ ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyAssignment, S, false);
+ ClassDecl->addDecl(CopyAssignment);
+
+ AddOverriddenMethods(ClassDecl, CopyAssignment);
+ return CopyAssignment;
+}
+
void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *CopyAssignOperator) {
assert((CopyAssignOperator->isImplicit() &&
CopyAssignOperator->isOverloadedOperator() &&
CopyAssignOperator->getOverloadedOperator() == OO_Equal &&
- !CopyAssignOperator->isUsed()) &&
+ !CopyAssignOperator->isUsed(false)) &&
"DefineImplicitCopyAssignment called for wrong function");
CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent();
@@ -4554,6 +4891,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// \brief Reference to the __builtin_memcpy function.
Expr *BuiltinMemCpyRef = 0;
+ // \brief Reference to the __builtin_objc_memmove_collectable function.
+ Expr *CollectableMemCpyRef = 0;
// Assign non-static members.
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
@@ -4630,9 +4969,35 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Take the address of the field references for "from" and "to".
From = CreateBuiltinUnaryOp(Loc, UnaryOperator::AddrOf, move(From));
To = CreateBuiltinUnaryOp(Loc, UnaryOperator::AddrOf, move(To));
-
+
+ bool NeedsCollectableMemCpy =
+ (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember());
+
+ if (NeedsCollectableMemCpy) {
+ if (!CollectableMemCpyRef) {
+ // Create a reference to the __builtin_objc_memmove_collectable function.
+ LookupResult R(*this,
+ &Context.Idents.get("__builtin_objc_memmove_collectable"),
+ Loc, LookupOrdinaryName);
+ LookupName(R, TUScope, true);
+
+ FunctionDecl *CollectableMemCpy = R.getAsSingle<FunctionDecl>();
+ if (!CollectableMemCpy) {
+ // Something went horribly wrong earlier, and we will have
+ // complained about it.
+ Invalid = true;
+ continue;
+ }
+
+ CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
+ CollectableMemCpy->getType(),
+ Loc, 0).takeAs<Expr>();
+ assert(CollectableMemCpyRef && "Builtin reference cannot fail");
+ }
+ }
// Create a reference to the __builtin_memcpy builtin function.
- if (!BuiltinMemCpyRef) {
+ else if (!BuiltinMemCpyRef) {
LookupResult R(*this, &Context.Idents.get("__builtin_memcpy"), Loc,
LookupOrdinaryName);
LookupName(R, TUScope, true);
@@ -4658,10 +5023,18 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
llvm::SmallVector<SourceLocation, 4> Commas; // FIXME: Silly
Commas.push_back(Loc);
Commas.push_back(Loc);
- OwningExprResult Call = ActOnCallExpr(/*Scope=*/0,
- Owned(BuiltinMemCpyRef->Retain()),
- Loc, move_arg(CallArgs),
- Commas.data(), Loc);
+ OwningExprResult Call = ExprError();
+ if (NeedsCollectableMemCpy)
+ Call = ActOnCallExpr(/*Scope=*/0,
+ Owned(CollectableMemCpyRef->Retain()),
+ Loc, move_arg(CallArgs),
+ Commas.data(), Loc);
+ else
+ Call = ActOnCallExpr(/*Scope=*/0,
+ Owned(BuiltinMemCpyRef->Retain()),
+ Loc, move_arg(CallArgs),
+ Commas.data(), Loc);
+
assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
Statements.push_back(Call.takeAs<Expr>());
continue;
@@ -4712,12 +5085,185 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CopyAssignOperator->setBody(Body.takeAs<Stmt>());
}
+CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.copy]p4:
+ // If the class definition does not explicitly declare a copy
+ // constructor, one is declared implicitly.
+
+ // C++ [class.copy]p5:
+ // The implicitly-declared copy constructor for a class X will
+ // have the form
+ //
+ // X::X(const X&)
+ //
+ // if
+ bool HasConstCopyConstructor = true;
+
+ // -- each direct or virtual base class B of X has a copy
+ // constructor whose first parameter is of type const B& or
+ // const volatile B&, and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ HasConstCopyConstructor && Base != BaseEnd;
+ ++Base) {
+ // Virtual bases are handled below.
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(BaseClassDecl);
+
+ HasConstCopyConstructor
+ = BaseClassDecl->hasConstCopyConstructor(Context);
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ HasConstCopyConstructor && Base != BaseEnd;
+ ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(BaseClassDecl);
+
+ HasConstCopyConstructor
+ = BaseClassDecl->hasConstCopyConstructor(Context);
+ }
+
+ // -- for all the nonstatic data members of X that are of a
+ // class type M (or array thereof), each such class type
+ // has a copy constructor whose first parameter is of type
+ // const M& or const volatile M&.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ HasConstCopyConstructor && Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ if (!FieldClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(FieldClassDecl);
+
+ HasConstCopyConstructor
+ = FieldClassDecl->hasConstCopyConstructor(Context);
+ }
+ }
+
+ // Otherwise, the implicitly declared copy constructor will have
+ // the form
+ //
+ // X::X(X&)
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ArgType = ClassType;
+ if (HasConstCopyConstructor)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ unsigned Quals = HasConstCopyConstructor? Qualifiers::Const : 0;
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd;
+ ++Base) {
+ // Virtual bases are handled below.
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(BaseClassDecl);
+
+ if (CXXConstructorDecl *CopyConstructor
+ = BaseClassDecl->getCopyConstructor(Context, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd;
+ ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(BaseClassDecl);
+
+ if (CXXConstructorDecl *CopyConstructor
+ = BaseClassDecl->getCopyConstructor(Context, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ if (!FieldClassDecl->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(FieldClassDecl);
+
+ if (CXXConstructorDecl *CopyConstructor
+ = FieldClassDecl->getCopyConstructor(Context, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ }
+
+ // An implicitly-declared copy constructor is an inline public
+ // member of its class.
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ CXXConstructorDecl *CopyConstructor
+ = CXXConstructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name,
+ Context.getFunctionType(Context.VoidTy,
+ &ArgType, 1,
+ false, 0,
+ ExceptSpec.hasExceptionSpecification(),
+ ExceptSpec.hasAnyExceptionSpecification(),
+ ExceptSpec.size(),
+ ExceptSpec.data(),
+ FunctionType::ExtInfo()),
+ /*TInfo=*/0,
+ /*isExplicit=*/false,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ CopyConstructor->setAccess(AS_public);
+ CopyConstructor->setImplicit();
+ CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
+
+ // Note that we have declared this constructor.
+ ClassDecl->setDeclaredCopyConstructor(true);
+ ++ASTContext::NumImplicitCopyConstructorsDeclared;
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
+ ClassDecl->getLocation(),
+ /*IdentifierInfo=*/0,
+ ArgType, /*TInfo=*/0,
+ VarDecl::None,
+ VarDecl::None, 0);
+ CopyConstructor->setParams(&FromParam, 1);
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyConstructor, S, false);
+ ClassDecl->addDecl(CopyConstructor);
+
+ return CopyConstructor;
+}
+
void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *CopyConstructor,
unsigned TypeQuals) {
assert((CopyConstructor->isImplicit() &&
CopyConstructor->isCopyConstructor(TypeQuals) &&
- !CopyConstructor->isUsed()) &&
+ !CopyConstructor->isUsed(false)) &&
"DefineImplicitCopyConstructor - call it for implicit copy ctor");
CXXRecordDecl *ClassDecl = CopyConstructor->getParent();
@@ -4810,7 +5356,7 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
if (!ClassDecl->isInvalidDecl() && !VD->isInvalidDecl() &&
!ClassDecl->hasTrivialDestructor() && !ClassDecl->isDependentContext()) {
- CXXDestructorDecl *Destructor = ClassDecl->getDestructor(Context);
+ CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
MarkDeclarationReferenced(VD->getLocation(), Destructor);
CheckDestructorAccess(VD->getLocation(), Destructor,
PDiag(diag::err_access_dtor_var)
@@ -5477,8 +6023,8 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
/// handler.
Sema::DeclPtrTy Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
- TypeSourceInfo *TInfo = 0;
- QualType ExDeclType = GetTypeForDeclarator(D, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType ExDeclType = TInfo->getType();
bool Invalid = D.isInvalidType();
IdentifierInfo *II = D.getIdentifier();
@@ -5632,14 +6178,11 @@ Sema::DeclPtrTy Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
Declarator TheDeclarator(DS, Declarator::MemberContext);
- TypeSourceInfo *TSI;
- QualType T = GetTypeForDeclarator(TheDeclarator, S, &TSI);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
+ QualType T = TSI->getType();
if (TheDeclarator.isInvalidType())
return DeclPtrTy();
- if (!TSI)
- TSI = Context.getTrivialTypeSourceInfo(T, DS.getSourceRange().getBegin());
-
// This is definitely an error in C++98. It's probably meant to
// be forbidden in C++0x, too, but the specification is just
// poorly written.
@@ -5701,8 +6244,8 @@ Sema::ActOnFriendFunctionDecl(Scope *S,
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
SourceLocation Loc = D.getIdentifierLoc();
- TypeSourceInfo *TInfo = 0;
- QualType T = GetTypeForDeclarator(D, S, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
// C++ [class.friend]p1
// A friend of a class is a function or class....
@@ -5759,13 +6302,18 @@ Sema::ActOnFriendFunctionDecl(Scope *S,
LookupQualifiedName(Previous, DC);
- // If searching in that context implicitly found a declaration in
- // a different context, treat it like it wasn't found at all.
+ // Ignore things found implicitly in the wrong scope.
// TODO: better diagnostics for this case. Suggesting the right
// qualified scope would be nice...
- // FIXME: getRepresentativeDecl() is not right here at all
- if (Previous.empty() ||
- !Previous.getRepresentativeDecl()->getDeclContext()->Equals(DC)) {
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!D->getDeclContext()->getLookupContext()->Equals(DC))
+ F.erase();
+ }
+ F.done();
+
+ if (Previous.empty()) {
D.setInvalidType();
Diag(Loc, diag::err_qualified_friend_not_found) << Name << T;
return DeclPtrTy();
@@ -6061,9 +6609,9 @@ Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
"Parser allowed 'typedef' as storage class of condition decl.");
- TypeSourceInfo *TInfo = 0;
TagDecl *OwnedTag = 0;
- QualType Ty = GetTypeForDeclarator(D, S, &TInfo, &OwnedTag);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag);
+ QualType Ty = TInfo->getType();
if (Ty->isFunctionType()) { // The declarator shall not specify a function...
// We exit without creating a CXXConditionDeclExpr because a FunctionDecl
@@ -6127,7 +6675,7 @@ bool Sema::DefineUsedVTables() {
if (const CXXMethodDecl *KeyFunction
= Context.getKeyFunction(DynamicClasses[I])) {
const FunctionDecl *Definition = 0;
- if (KeyFunction->getBody(Definition))
+ if (KeyFunction->hasBody(Definition))
MarkVTableUsed(Definition->getLocation(), DynamicClasses[I], true);
}
}
@@ -6150,7 +6698,7 @@ bool Sema::DefineUsedVTables() {
// defined in another translation unit, we don't need to emit the
// vtable even though we're using it.
const CXXMethodDecl *KeyFunction = Context.getKeyFunction(Class);
- if (KeyFunction && !KeyFunction->getBody()) {
+ if (KeyFunction && !KeyFunction->hasBody()) {
switch (KeyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
@@ -6198,7 +6746,7 @@ bool Sema::DefineUsedVTables() {
// Optionally warn if we're emitting a weak vtable.
if (Class->getLinkage() == ExternalLinkage &&
Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) {
- if (!KeyFunction || (KeyFunction->getBody() && KeyFunction->isInlined()))
+ if (!KeyFunction || (KeyFunction->hasBody() && KeyFunction->isInlined()))
Diag(Class->getLocation(), diag::warn_weak_vtable) << Class;
}
}
@@ -6279,8 +6827,7 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
= Context.getBaseElementType(Field->getType())
->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
- if (CXXDestructorDecl *Destructor
- = const_cast<CXXDestructorDecl*>(RD->getDestructor(Context))) {
+ if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
MarkDeclarationReferenced(Field->getLocation(), Destructor);
CheckDestructorAccess(Field->getLocation(), Destructor,
PDiag(diag::err_access_dtor_ivar)
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
index 3b05f5a..21aeb59 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -414,7 +414,7 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc) {
- ObjCCategoryDecl *CDecl = 0;
+ ObjCCategoryDecl *CDecl;
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
/// Check that class of this category is already completely declared.
@@ -429,28 +429,21 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
return DeclPtrTy::make(CDecl);
}
- if (!CategoryName) {
- // Class extensions require a special treatment. Use an existing one.
- // Note that 'getClassExtension()' can return NULL.
- CDecl = IDecl->getClassExtension();
- if (IDecl->getImplementation()) {
- Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName;
- Diag(IDecl->getImplementation()->getLocation(),
- diag::note_implementation_declared);
- }
+ if (!CategoryName && IDecl->getImplementation()) {
+ Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName;
+ Diag(IDecl->getImplementation()->getLocation(),
+ diag::note_implementation_declared);
}
- if (!CDecl) {
- CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
- ClassLoc, CategoryLoc, CategoryName);
- // FIXME: PushOnScopeChains?
- CurContext->addDecl(CDecl);
+ CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassLoc, CategoryLoc, CategoryName);
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(CDecl);
- CDecl->setClassInterface(IDecl);
- // Insert first use of class extension to the list of class's categories.
- if (!CategoryName)
- CDecl->insertNextClassCategory();
- }
+ CDecl->setClassInterface(IDecl);
+ // Insert class extension to the list of class's categories.
+ if (!CategoryName)
+ CDecl->insertNextClassCategory();
// If the interface is deprecated, warn about it.
(void)DiagnoseUseOfDecl(IDecl, ClassLoc);
@@ -969,13 +962,11 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
InsMap, ClsMap, I);
// Check class extensions (unnamed categories)
- for (ObjCCategoryDecl *Categories = I->getCategoryList();
- Categories; Categories = Categories->getNextClassCategory()) {
- if (Categories->IsClassExtension()) {
- ImplMethodsVsClassMethods(S, IMPDecl, Categories, IncompleteImpl);
- break;
- }
- }
+ for (const ObjCCategoryDecl *Categories = I->getFirstClassExtension();
+ Categories; Categories = Categories->getNextClassExtension())
+ ImplMethodsVsClassMethods(S, IMPDecl,
+ const_cast<ObjCCategoryDecl*>(Categories),
+ IncompleteImpl);
} else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
// For extended class, unimplemented methods in its protocols will
// be reported in the primary class.
@@ -1775,9 +1766,9 @@ Sema::DeclPtrTy Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
if (getLangOptions().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
- TypeSourceInfo *TInfo = 0;
TagDecl *OwnedDecl = 0;
- QualType ExceptionType = GetTypeForDeclarator(D, S, &TInfo, &OwnedDecl);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl);
+ QualType ExceptionType = TInfo->getType();
if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) {
// Objective-C++: Types shall not be defined in exception types.
@@ -1821,7 +1812,8 @@ void Sema::CollectIvarsToConstructOrDestruct(const ObjCInterfaceDecl *OI,
}
// Find ivars to construct/destruct in class extension.
- if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) {
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension()) {
for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
E = CDecl->ivar_end(); I != E; ++I) {
ObjCIvarDecl *Iv = (*I);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
index 7d73fe4..34a479a 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -249,6 +249,10 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
SourceLocation NewLoc,
bool *MissingExceptionSpecification,
bool *MissingEmptyExceptionSpecification) {
+ // Just completely ignore this under -fno-exceptions.
+ if (!getLangOptions().Exceptions)
+ return false;
+
if (MissingExceptionSpecification)
*MissingExceptionSpecification = false;
@@ -318,6 +322,11 @@ bool Sema::CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc) {
+
+ // Just auto-succeed under -fno-exceptions.
+ if (!getLangOptions().Exceptions)
+ return false;
+
// FIXME: As usual, we could be more specific in our error messages, but
// that better waits until we've got types with source locations.
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
index f745352..5f46a97 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -164,7 +164,7 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
if (!sentinelExpr) return;
if (sentinelExpr->isTypeDependent()) return;
if (sentinelExpr->isValueDependent()) return;
- if (sentinelExpr->getType()->isPointerType() &&
+ if (sentinelExpr->getType()->isAnyPointerType() &&
sentinelExpr->IgnoreParenCasts()->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull))
return;
@@ -388,7 +388,7 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
if (Literal.Pascal) StrTy = Context.UnsignedCharTy;
// A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
- if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings )
+ if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings)
StrTy.addConst();
// Get an array type for the string, according to C99 6.4.5. This includes
@@ -475,6 +475,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, SourceLocation Loc,
if (isa<NonTypeTemplateParmDecl>(VD)) {
// Non-type template parameters can be referenced anywhere they are
// visible.
+ Ty = Ty.getNonLValueExprType(Context);
} else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) {
if (const FunctionDecl *FD = MD->getParent()->isLocalClass()) {
if (VD->hasLocalStorage() && VD->getDeclContext() != CurContext) {
@@ -677,26 +678,6 @@ static void DecomposeUnqualifiedId(Sema &SemaRef,
}
}
-/// Decompose the given template name into a list of lookup results.
-///
-/// The unqualified ID must name a non-dependent template, which can
-/// be more easily tested by checking whether DecomposeUnqualifiedId
-/// found template arguments.
-static void DecomposeTemplateName(LookupResult &R, const UnqualifiedId &Id) {
- assert(Id.getKind() == UnqualifiedId::IK_TemplateId);
- TemplateName TName =
- Sema::TemplateTy::make(Id.TemplateId->Template).getAsVal<TemplateName>();
-
- if (TemplateDecl *TD = TName.getAsTemplateDecl())
- R.addDecl(TD);
- else if (OverloadedTemplateStorage *OT = TName.getAsOverloadedTemplate())
- for (OverloadedTemplateStorage::iterator I = OT->begin(), E = OT->end();
- I != E; ++I)
- R.addDecl(*I);
-
- R.resolveKind();
-}
-
/// Determines whether the given record is "fully-formed" at the given
/// location, i.e. whether a qualified lookup into it is assured of
/// getting consistent results already.
@@ -889,8 +870,8 @@ static void DiagnoseInstanceReference(Sema &SemaRef,
/// Diagnose an empty lookup.
///
/// \return false if new lookup candidates were found
-bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS,
- LookupResult &R, CorrectTypoContext CTC) {
+bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ CorrectTypoContext CTC) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@@ -906,7 +887,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
- for (DeclContext *DC = SS.isEmpty()? CurContext : 0;
+ for (DeclContext *DC = SS.isEmpty() ? CurContext : 0;
DC; DC = DC->getParent()) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@@ -923,11 +904,29 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS,
// Give a code modification hint to insert 'this->'.
// TODO: fixit for inserting 'Base<T>::' in the other cases.
// Actually quite difficult!
- if (isInstance)
+ if (isInstance) {
Diag(R.getNameLoc(), diagnostic) << Name
<< FixItHint::CreateInsertion(R.getNameLoc(), "this->");
- else
+
+ UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(
+ CallsUndergoingInstantiation.back()->getCallee());
+ CXXMethodDecl *DepMethod = cast<CXXMethodDecl>(
+ CurMethod->getInstantiatedFromMemberFunction());
+ QualType DepThisType = DepMethod->getThisType(Context);
+ CXXThisExpr *DepThis = new (Context) CXXThisExpr(R.getNameLoc(),
+ DepThisType, false);
+ TemplateArgumentListInfo TList;
+ if (ULE->hasExplicitTemplateArgs())
+ ULE->copyTemplateArgumentsInto(TList);
+ CXXDependentScopeMemberExpr *DepExpr =
+ CXXDependentScopeMemberExpr::Create(
+ Context, DepThis, DepThisType, true, SourceLocation(),
+ ULE->getQualifier(), ULE->getQualifierRange(), NULL, Name,
+ R.getNameLoc(), &TList);
+ CallsUndergoingInstantiation.back()->setCallee(DepExpr);
+ } else {
Diag(R.getNameLoc(), diagnostic) << Name;
+ }
// Do we really want to note all of these?
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
@@ -941,7 +940,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS,
// We didn't find anything, so try to correct for a typo.
DeclarationName Corrected;
- if (S && (Corrected = CorrectTypo(R, S, &SS, false, CTC))) {
+ if (S && (Corrected = CorrectTypo(R, S, &SS, 0, false, CTC))) {
if (!R.empty()) {
if (isa<ValueDecl>(*R.begin()) || isa<FunctionTemplateDecl>(*R.begin())) {
if (SS.isEmpty())
@@ -1746,8 +1745,29 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// Variable will be bound by-copy, make it const within the closure.
ExprTy.addConst();
- return Owned(new (Context) BlockDeclRefExpr(VD, ExprTy, Loc, false,
- constAdded));
+ QualType T = VD->getType();
+ BlockDeclRefExpr *BDRE = new (Context) BlockDeclRefExpr(VD,
+ ExprTy, Loc, false,
+ constAdded);
+ if (getLangOptions().CPlusPlus) {
+ if (!T->isDependentType() && !T->isReferenceType()) {
+ Expr *E = new (Context)
+ DeclRefExpr(const_cast<ValueDecl*>(BDRE->getDecl()), T,
+ SourceLocation());
+
+ OwningExprResult Res = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(VD->getLocation(),
+ T, false),
+ SourceLocation(),
+ Owned(E));
+ if (!Res.isInvalid()) {
+ Res = MaybeCreateCXXExprWithTemporaries(move(Res));
+ Expr *Init = Res.takeAs<Expr>();
+ BDRE->setCopyConstructorExpr(Init);
+ }
+ }
+ }
+ return Owned(BDRE);
}
// If this reference is not in a block or if the referenced variable is
// within the block, create a normal DeclRefExpr.
@@ -2560,13 +2580,23 @@ bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
static bool
LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
SourceRange BaseRange, const RecordType *RTy,
- SourceLocation OpLoc, CXXScopeSpec &SS) {
+ SourceLocation OpLoc, CXXScopeSpec &SS,
+ bool HasTemplateArgs) {
RecordDecl *RDecl = RTy->getDecl();
if (SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
SemaRef.PDiag(diag::err_typecheck_incomplete_tag)
<< BaseRange))
return true;
+ if (HasTemplateArgs) {
+ // LookupTemplateName doesn't expect these both to exist simultaneously.
+ QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
+
+ bool MOUS;
+ SemaRef.LookupTemplateName(R, 0, SS, ObjectType, false, MOUS);
+ return false;
+ }
+
DeclContext *DC = RDecl;
if (SS.isSet()) {
// If the member name was a qualified-id, look into the
@@ -2610,6 +2640,7 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
return false;
} else {
R.clear();
+ R.setLookupName(Name);
}
return false;
@@ -2640,14 +2671,14 @@ Sema::BuildMemberReferenceExpr(ExprArg BaseArg, QualType BaseType,
if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
if (LookupMemberExprInRecord(*this, R, SourceRange(),
RecordTy->getAs<RecordType>(),
- OpLoc, SS))
+ OpLoc, SS, TemplateArgs != 0))
return ExprError();
// Explicit member accesses.
} else {
OwningExprResult Result =
LookupMemberExpr(R, Base, IsArrow, OpLoc,
- SS, /*ObjCImpDecl*/ DeclPtrTy());
+ SS, /*ObjCImpDecl*/ DeclPtrTy(), TemplateArgs != 0);
if (Result.isInvalid()) {
Owned(Base);
@@ -2860,7 +2891,7 @@ Sema::OwningExprResult
Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
bool &IsArrow, SourceLocation OpLoc,
CXXScopeSpec &SS,
- DeclPtrTy ObjCImpDecl) {
+ DeclPtrTy ObjCImpDecl, bool HasTemplateArgs) {
assert(BaseExpr && "no base expression");
// Perform default conversions.
@@ -2893,6 +2924,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
OwningExprResult NewBase
= ActOnCallExpr(0, ExprArg(*this, BaseExpr), Loc,
MultiExprArg(*this, 0, 0), 0, Loc);
+ BaseExpr = 0;
if (NewBase.isInvalid())
return ExprError();
@@ -2973,7 +3005,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
QualType PType;
if (Getter)
- PType = Getter->getResultType();
+ PType = Getter->getSendResultType();
else
// Get the expression type from Setter's incoming parameter.
PType = (*(Setter->param_end() -1))->getType();
@@ -3037,7 +3069,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
// Handle field access to simple records.
if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
if (LookupMemberExprInRecord(*this, R, BaseExpr->getSourceRange(),
- RTy, OpLoc, SS))
+ RTy, OpLoc, SS, HasTemplateArgs))
return ExprError();
return Owned((Expr*) 0);
}
@@ -3069,6 +3101,9 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
IV->getNameAsString());
Diag(IV->getLocation(), diag::note_previous_decl)
<< IV->getDeclName();
+ } else {
+ Res.clear();
+ Res.setLookupName(Member);
}
}
@@ -3146,7 +3181,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr,
return ExprError();
return Owned(ObjCMessageExpr::Create(Context,
- OMD->getResultType().getNonReferenceType(),
+ OMD->getSendResultType(),
OpLoc, BaseExpr, Sel,
OMD, NULL, 0, MemberLoc));
}
@@ -3239,44 +3274,24 @@ Sema::OwningExprResult Sema::ActOnMemberAccessExpr(Scope *S, ExprArg BaseArg,
TemplateArgs);
} else {
LookupResult R(*this, Name, NameLoc, LookupMemberName);
- if (TemplateArgs) {
- // Re-use the lookup done for the template name.
- DecomposeTemplateName(R, Id);
-
- // Re-derive the naming class.
- if (SS.isSet()) {
- NestedNameSpecifier *Qualifier
- = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
- if (const Type *Ty = Qualifier->getAsType())
- if (CXXRecordDecl *NamingClass = Ty->getAsCXXRecordDecl())
- R.setNamingClass(NamingClass);
- } else {
- QualType BaseType = Base->getType();
- if (const PointerType *Ptr = BaseType->getAs<PointerType>())
- BaseType = Ptr->getPointeeType();
- if (CXXRecordDecl *NamingClass = BaseType->getAsCXXRecordDecl())
- R.setNamingClass(NamingClass);
- }
- } else {
- Result = LookupMemberExpr(R, Base, IsArrow, OpLoc,
- SS, ObjCImpDecl);
+ Result = LookupMemberExpr(R, Base, IsArrow, OpLoc,
+ SS, ObjCImpDecl, TemplateArgs != 0);
- if (Result.isInvalid()) {
- Owned(Base);
- return ExprError();
- }
+ if (Result.isInvalid()) {
+ Owned(Base);
+ return ExprError();
+ }
- if (Result.get()) {
- // The only way a reference to a destructor can be used is to
- // immediately call it, which falls into this case. If the
- // next token is not a '(', produce a diagnostic and build the
- // call now.
- if (!HasTrailingLParen &&
- Id.getKind() == UnqualifiedId::IK_DestructorName)
- return DiagnoseDtorReference(NameLoc, move(Result));
+ if (Result.get()) {
+ // The only way a reference to a destructor can be used is to
+ // immediately call it, which falls into this case. If the
+ // next token is not a '(', produce a diagnostic and build the
+ // call now.
+ if (!HasTrailingLParen &&
+ Id.getKind() == UnqualifiedId::IK_DestructorName)
+ return DiagnoseDtorReference(NameLoc, move(Result));
- return move(Result);
- }
+ return move(Result);
}
Result = BuildMemberReferenceExpr(ExprArg(*this, Base), Base->getType(),
@@ -3304,9 +3319,10 @@ Sema::OwningExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
MultiLevelTemplateArgumentList ArgList
= getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true);
- InstantiatingTemplate Inst(*this, CallLoc, Param,
- ArgList.getInnermost().getFlatArgumentList(),
- ArgList.getInnermost().flat_size());
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = ArgList.getInnermost();
+ InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first,
+ Innermost.second);
OwningExprResult Result = SubstExpr(UninstExpr, ArgList);
if (Result.isInvalid())
@@ -3560,7 +3576,7 @@ Sema::ActOnCallExpr(Scope *S, ExprArg fn, SourceLocation LParenLoc,
BO->getOpcode() == BinaryOperator::PtrMemI) {
if (const FunctionProtoType *FPT
= BO->getType()->getAs<FunctionProtoType>()) {
- QualType ResultTy = FPT->getResultType().getNonReferenceType();
+ QualType ResultTy = FPT->getCallResultType(Context);
ExprOwningPtr<CXXMemberCallExpr>
TheCall(this, new (Context) CXXMemberCallExpr(Context, BO, Args,
@@ -3650,7 +3666,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
return ExprError();
// We know the result type of the call, set it.
- TheCall->setType(FuncT->getResultType().getNonReferenceType());
+ TheCall->setType(FuncT->getCallResultType(Context));
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) {
if (ConvertArgumentsForCall(&*TheCall, Fn, FDecl, Proto, Args, NumArgs,
@@ -3663,7 +3679,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// Check if we have too few/too many template arguments, based
// on our knowledge of the function definition.
const FunctionDecl *Def = 0;
- if (FDecl->getBody(Def) && NumArgs != Def->param_size()) {
+ if (FDecl->hasBody(Def) && NumArgs != Def->param_size()) {
const FunctionProtoType *Proto =
Def->getType()->getAs<FunctionProtoType>();
if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size())) {
@@ -3893,12 +3909,13 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr,
if (!castType->isArithmeticType()) {
QualType castExprType = castExpr->getType();
- if (!castExprType->isIntegralType() && castExprType->isArithmeticType())
+ if (!castExprType->isIntegralType(Context) &&
+ castExprType->isArithmeticType())
return Diag(castExpr->getLocStart(),
diag::err_cast_pointer_from_non_pointer_int)
<< castExprType << castExpr->getSourceRange();
} else if (!castExpr->getType()->isArithmeticType()) {
- if (!castType->isIntegralType() && castType->isArithmeticType())
+ if (!castType->isIntegralType(Context) && castType->isArithmeticType())
return Diag(castExpr->getLocStart(),
diag::err_cast_pointer_to_non_pointer_int)
<< castType << castExpr->getSourceRange();
@@ -3992,7 +4009,8 @@ Sema::BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty,
return ExprError();
Op.release();
- return Owned(new (Context) CStyleCastExpr(Ty->getType().getNonReferenceType(),
+ return Owned(new (Context) CStyleCastExpr(
+ Ty->getType().getNonLValueExprType(Context),
Kind, castExpr, BasePath, Ty,
LParenLoc, RParenLoc));
}
@@ -4021,15 +4039,26 @@ Sema::ActOnCastOfParenListExpr(Scope *S, SourceLocation LParenLoc,
TypeSourceInfo *TInfo) {
ParenListExpr *PE = (ParenListExpr *)Op.get();
QualType Ty = TInfo->getType();
+ bool isAltiVecLiteral = false;
- // If this is an altivec initializer, '(' type ')' '(' init, ..., init ')'
- // then handle it as such.
+ // Check for an altivec literal,
+ // i.e. all the elements are integer constants.
if (getLangOptions().AltiVec && Ty->isVectorType()) {
if (PE->getNumExprs() == 0) {
Diag(PE->getExprLoc(), diag::err_altivec_empty_initializer);
return ExprError();
}
+ if (PE->getNumExprs() == 1) {
+ if (!PE->getExpr(0)->getType()->isVectorType())
+ isAltiVecLiteral = true;
+ }
+ else
+ isAltiVecLiteral = true;
+ }
+ // If this is an altivec initializer, '(' type ')' '(' init, ..., init ')'
+ // then handle it as such.
+ if (isAltiVecLiteral) {
llvm::SmallVector<Expr *, 8> initExprs;
for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i)
initExprs.push_back(PE->getExpr(i));
@@ -4634,7 +4663,7 @@ Sema::CheckAssignmentConstraints(QualType lhsType, QualType rhsType) {
if (lhsType->isExtVectorType()) {
if (rhsType->isExtVectorType())
return lhsType == rhsType ? Compatible : Incompatible;
- if (!rhsType->isVectorType() && rhsType->isArithmeticType())
+ if (rhsType->isArithmeticType())
return Compatible;
}
@@ -4877,7 +4906,7 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
// The getNonReferenceType() call makes sure that the resulting expression
// does not have reference type.
if (result != Incompatible && rExpr->getType() != lhsType)
- ImpCastExprToType(rExpr, lhsType.getNonReferenceType(),
+ ImpCastExprToType(rExpr, lhsType.getNonLValueExprType(Context),
CastExpr::CK_Unknown);
return result;
}
@@ -4932,7 +4961,7 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, Expr *&lex, Expr *&rex) {
// Handle the case of an ext vector and scalar.
if (const ExtVectorType *LV = lhsType->getAs<ExtVectorType>()) {
QualType EltTy = LV->getElementType();
- if (EltTy->isIntegralType() && rhsType->isIntegralType()) {
+ if (EltTy->isIntegralType(Context) && rhsType->isIntegralType(Context)) {
if (Context.getIntegerTypeOrder(EltTy, rhsType) >= 0) {
ImpCastExprToType(rex, lhsType, CastExpr::CK_IntegralCast);
if (swapped) std::swap(rex, lex);
@@ -5263,6 +5292,16 @@ QualType Sema::CheckShiftOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
return LHSTy;
}
+static bool IsWithinTemplateSpecialization(Decl *D) {
+ if (DeclContext *DC = D->getDeclContext()) {
+ if (isa<ClassTemplateSpecializationDecl>(DC))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ return FD->isFunctionTemplateSpecialization();
+ }
+ return false;
+}
+
// C99 6.5.8, C++ [expr.rel]
QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
unsigned OpaqueOpc, bool isRelational) {
@@ -5272,30 +5311,55 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
if (lex->getType()->isVectorType() || rex->getType()->isVectorType())
return CheckVectorCompareOperands(lex, rex, Loc, isRelational);
- // C99 6.5.8p3 / C99 6.5.9p4
- if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType())
- UsualArithmeticConversions(lex, rex);
- else {
- UsualUnaryConversions(lex);
- UsualUnaryConversions(rex);
- }
QualType lType = lex->getType();
QualType rType = rex->getType();
- if (!lType->isFloatingType()
- && !(lType->isBlockPointerType() && isRelational)) {
+ if (!lType->hasFloatingRepresentation() &&
+ !(lType->isBlockPointerType() && isRelational)) {
// For non-floating point types, check for self-comparisons of the form
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
// often indicate logic errors in the program.
- // NOTE: Don't warn about comparisons of enum constants. These can arise
- // from macro expansions, and are usually quite deliberate.
+ //
+ // NOTE: Don't warn about comparison expressions resulting from macro
+ // expansion. Also don't warn about comparisons which are only self
+ // comparisons within a template specialization. The warnings should catch
+ // obvious cases in the definition of the template anyways. The idea is to
+ // warn when the typed comparison operator will always evaluate to the same
+ // result.
Expr *LHSStripped = lex->IgnoreParens();
Expr *RHSStripped = rex->IgnoreParens();
- if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped))
- if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped))
- if (DRL->getDecl() == DRR->getDecl() &&
- !isa<EnumConstantDecl>(DRL->getDecl()))
- DiagRuntimeBehavior(Loc, PDiag(diag::warn_selfcomparison));
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped)) {
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped)) {
+ if (DRL->getDecl() == DRR->getDecl() && !Loc.isMacroID() &&
+ !IsWithinTemplateSpecialization(DRL->getDecl())) {
+ DiagRuntimeBehavior(Loc, PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << (Opc == BinaryOperator::EQ
+ || Opc == BinaryOperator::LE
+ || Opc == BinaryOperator::GE));
+ } else if (lType->isArrayType() && rType->isArrayType() &&
+ !DRL->getDecl()->getType()->isReferenceType() &&
+ !DRR->getDecl()->getType()->isReferenceType()) {
+ // what is it always going to eval to?
+ char always_evals_to;
+ switch(Opc) {
+ case BinaryOperator::EQ: // e.g. array1 == array2
+ always_evals_to = 0; // false
+ break;
+ case BinaryOperator::NE: // e.g. array1 != array2
+ always_evals_to = 1; // true
+ break;
+ default:
+ // best we can say is 'a constant'
+ always_evals_to = 2; // e.g. array1 <= array2
+ break;
+ }
+ DiagRuntimeBehavior(Loc, PDiag(diag::warn_comparison_always)
+ << 1 // array
+ << always_evals_to);
+ }
+ }
+ }
if (isa<CastExpr>(LHSStripped))
LHSStripped = LHSStripped->IgnoreParenCasts();
@@ -5338,6 +5402,17 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
}
}
+ // C99 6.5.8p3 / C99 6.5.9p4
+ if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType())
+ UsualArithmeticConversions(lex, rex);
+ else {
+ UsualUnaryConversions(lex);
+ UsualUnaryConversions(rex);
+ }
+
+ lType = lex->getType();
+ rType = rex->getType();
+
// The result of comparisons is 'bool' in C++, 'int' in C.
QualType ResultTy = getLangOptions().CPlusPlus ? Context.BoolTy:Context.IntTy;
@@ -5346,7 +5421,7 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
return ResultTy;
} else {
// Check for comparisons of floating point operands using != and ==.
- if (lType->isFloatingType() && rType->isFloatingType())
+ if (lType->hasFloatingRepresentation())
CheckFloatComparison(Loc,lex,rex);
if (lType->isArithmeticType() && rType->isArithmeticType())
@@ -5358,9 +5433,8 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
bool RHSIsNull = rex->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull);
- // All of the following pointer related warnings are GCC extensions, except
- // when handling null pointer constants. One day, we can consider making them
- // errors (when -pedantic-errors is enabled).
+ // All of the following pointer-related warnings are GCC extensions, except
+ // when handling null pointer constants.
if (lType->isPointerType() && rType->isPointerType()) { // C99 6.5.8p2
QualType LCanPointeeTy =
Context.getCanonicalType(lType->getAs<PointerType>()->getPointeeType());
@@ -5374,10 +5448,19 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
(LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
// Valid unless comparison between non-null pointer and function pointer
// This is a gcc extension compatibility comparison.
+ // In a SFINAE context, we treat this as a hard error to maintain
+ // conformance with the C++ standard.
if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
&& !LHSIsNull && !RHSIsNull) {
- Diag(Loc, diag::ext_typecheck_comparison_of_fptr_to_void)
+ Diag(Loc,
+ isSFINAEContext()?
+ diag::err_typecheck_comparison_of_fptr_to_void
+ : diag::ext_typecheck_comparison_of_fptr_to_void)
<< lType << rType << lex->getSourceRange() << rex->getSourceRange();
+
+ if (isSFINAEContext())
+ return QualType();
+
ImpCastExprToType(rex, lType, CastExpr::CK_BitCast);
return ResultTy;
}
@@ -5541,40 +5624,36 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
return ResultTy;
}
}
- if (lType->isAnyPointerType() && rType->isIntegerType()) {
+ if ((lType->isAnyPointerType() && rType->isIntegerType()) ||
+ (lType->isIntegerType() && rType->isAnyPointerType())) {
unsigned DiagID = 0;
- if (RHSIsNull) {
- if (isRelational)
+ bool isError = false;
+ if ((LHSIsNull && lType->isIntegerType()) ||
+ (RHSIsNull && rType->isIntegerType())) {
+ if (isRelational && !getLangOptions().CPlusPlus)
DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
- } else if (isRelational)
+ } else if (isRelational && !getLangOptions().CPlusPlus)
DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
- else
+ else if (getLangOptions().CPlusPlus) {
+ DiagID = diag::err_typecheck_comparison_of_pointer_integer;
+ isError = true;
+ } else
DiagID = diag::ext_typecheck_comparison_of_pointer_integer;
if (DiagID) {
Diag(Loc, DiagID)
<< lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ if (isError)
+ return QualType();
}
- ImpCastExprToType(rex, lType, CastExpr::CK_IntegralToPointer);
- return ResultTy;
- }
- if (lType->isIntegerType() && rType->isAnyPointerType()) {
- unsigned DiagID = 0;
- if (LHSIsNull) {
- if (isRelational)
- DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
- } else if (isRelational)
- DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
+
+ if (lType->isIntegerType())
+ ImpCastExprToType(lex, rType, CastExpr::CK_IntegralToPointer);
else
- DiagID = diag::ext_typecheck_comparison_of_pointer_integer;
-
- if (DiagID) {
- Diag(Loc, DiagID)
- << lType << rType << lex->getSourceRange() << rex->getSourceRange();
- }
- ImpCastExprToType(lex, rType, CastExpr::CK_IntegralToPointer);
+ ImpCastExprToType(rex, lType, CastExpr::CK_IntegralToPointer);
return ResultTy;
}
+
// Handle block pointers.
if (!isRelational && RHSIsNull
&& lType->isBlockPointerType() && rType->isIntegerType()) {
@@ -5608,16 +5687,20 @@ QualType Sema::CheckVectorCompareOperands(Expr *&lex, Expr *&rex,
// For non-floating point types, check for self-comparisons of the form
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
// often indicate logic errors in the program.
- if (!lType->isFloatingType()) {
+ if (!lType->hasFloatingRepresentation()) {
if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(lex->IgnoreParens()))
if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(rex->IgnoreParens()))
if (DRL->getDecl() == DRR->getDecl())
- DiagRuntimeBehavior(Loc, PDiag(diag::warn_selfcomparison));
+ DiagRuntimeBehavior(Loc,
+ PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << 2 // "a constant"
+ );
}
// Check for comparisons of floating point operands using != and ==.
- if (!isRelational && lType->isFloatingType()) {
- assert (rType->isFloatingType());
+ if (!isRelational && lType->hasFloatingRepresentation()) {
+ assert (rType->hasFloatingRepresentation());
CheckFloatComparison(Loc,lex,rex);
}
@@ -5652,7 +5735,25 @@ inline QualType Sema::CheckBitwiseOperands(
}
inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
- Expr *&lex, Expr *&rex, SourceLocation Loc) {
+ Expr *&lex, Expr *&rex, SourceLocation Loc, unsigned Opc) {
+
+ // Diagnose cases where the user write a logical and/or but probably meant a
+ // bitwise one. We do this when the LHS is a non-bool integer and the RHS
+ // is a constant.
+ if (lex->getType()->isIntegerType() && !lex->getType()->isBooleanType() &&
+ rex->getType()->isIntegerType() && rex->isEvaluatable(Context) &&
+ // Don't warn if the RHS is a (constant folded) boolean expression like
+ // "sizeof(int) == 4".
+ !rex->isKnownToHaveBooleanValue() &&
+ // Don't warn in macros.
+ !Loc.isMacroID())
+ Diag(Loc, diag::warn_logical_instead_of_bitwise)
+ << rex->getSourceRange()
+ << (Opc == BinaryOperator::LAnd ? "&&" : "||")
+ << (Opc == BinaryOperator::LAnd ? "&" : "|");
+
+
+
if (!Context.getLangOptions().CPlusPlus) {
UsualUnaryConversions(lex);
UsualUnaryConversions(rex);
@@ -5663,25 +5764,14 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
return Context.IntTy;
}
+ // The following is safe because we only use this method for
+ // non-overloadable operands.
+
// C++ [expr.log.and]p1
// C++ [expr.log.or]p1
- // The operands are both implicitly converted to type bool (clause 4).
- StandardConversionSequence LHS;
- if (!IsStandardConversion(lex, Context.BoolTy,
- /*InOverloadResolution=*/false, LHS))
- return InvalidOperands(Loc, lex, rex);
-
- if (PerformImplicitConversion(lex, Context.BoolTy, LHS,
- AA_Passing, /*IgnoreBaseAccess=*/false))
- return InvalidOperands(Loc, lex, rex);
-
- StandardConversionSequence RHS;
- if (!IsStandardConversion(rex, Context.BoolTy,
- /*InOverloadResolution=*/false, RHS))
- return InvalidOperands(Loc, lex, rex);
-
- if (PerformImplicitConversion(rex, Context.BoolTy, RHS,
- AA_Passing, /*IgnoreBaseAccess=*/false))
+ // The operands are both contextually converted to type bool.
+ if (PerformContextuallyConvertToBool(lex) ||
+ PerformContextuallyConvertToBool(rex))
return InvalidOperands(Loc, lex, rex);
// C++ [expr.log.and]p2
@@ -5786,11 +5876,22 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
QualType LHSType = LHS->getType();
QualType RHSType = CompoundType.isNull() ? RHS->getType() : CompoundType;
-
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
+ QualType LHSTy(LHSType);
// Simple assignment "x = y".
- ConvTy = CheckSingleAssignmentConstraints(LHSType, RHS);
+ if (const ObjCImplicitSetterGetterRefExpr *OISGE =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(LHS)) {
+ // If using property-dot syntax notation for assignment, and there is a
+ // setter, RHS expression is being passed to the setter argument. So,
+ // type conversion (and comparison) is RHS to setter's argument type.
+ if (const ObjCMethodDecl *SetterMD = OISGE->getSetterMethod()) {
+ ObjCMethodDecl::param_iterator P = SetterMD->param_begin();
+ LHSTy = (*P)->getType();
+ }
+ }
+
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
// Special case of NSObject attributes on c-style pointer types.
if (ConvTy == IncompatiblePointer &&
((Context.isObjCNSObjectType(LHSType) &&
@@ -5829,6 +5930,23 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
RHS, AA_Assigning))
return QualType();
+
+ // Check to see if the destination operand is a dereferenced null pointer. If
+ // so, and if not volatile-qualified, this is undefined behavior that the
+ // optimizer will delete, so warn about it. People sometimes try to use this
+ // to get a deterministic trap and are surprised by clang's behavior. This
+ // only handles the pattern "*null = whatever", which is a very syntactic
+ // check.
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS->IgnoreParenCasts()))
+ if (UO->getOpcode() == UnaryOperator::Deref &&
+ UO->getSubExpr()->IgnoreParenCasts()->
+ isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) &&
+ !UO->getType().isVolatileQualified()) {
+ Diag(UO->getOperatorLoc(), diag::warn_indirection_through_null)
+ << UO->getSubExpr()->getSourceRange();
+ Diag(UO->getOperatorLoc(), diag::note_indirection_through_null);
+ }
+
// C99 6.5.16p3: The type of an assignment expression is the type of the
// left operand unless the left operand has qualified type, in which case
// it is the unqualified version of the type of the left operand.
@@ -5841,6 +5959,8 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
// C99 6.5.17
QualType Sema::CheckCommaOperands(Expr *LHS, Expr *&RHS, SourceLocation Loc) {
+ DiagnoseUnusedExprResult(LHS);
+
// Comma performs lvalue conversion (C99 6.3.2.1), but not unary conversions.
// C++ does not perform this conversion (C++ [expr.comma]p1).
if (!getLangOptions().CPlusPlus)
@@ -6025,13 +6145,17 @@ QualType Sema::CheckAddressOfOperand(Expr *op, SourceLocation OpLoc) {
return Context.getMemberPointerType(op->getType(),
Context.getTypeDeclType(cast<RecordDecl>(dcl->getDeclContext()))
.getTypePtr());
- } else if (lval == Expr::LV_ClassTemporary) {
+ }
+
+ if (lval == Expr::LV_ClassTemporary) {
Diag(OpLoc, isSFINAEContext()? diag::err_typecheck_addrof_class_temporary
: diag::ext_typecheck_addrof_class_temporary)
<< op->getType() << op->getSourceRange();
if (isSFINAEContext())
return QualType();
- } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
+ } else if (isa<ObjCSelectorExpr>(op))
+ return Context.getPointerType(op->getType());
+ else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
// C99 6.5.3.2p1
// The operand must be either an l-value or a function designator
if (!op->getType()->isFunctionType()) {
@@ -6112,26 +6236,32 @@ QualType Sema::CheckAddressOfOperand(Expr *op, SourceLocation OpLoc) {
return Context.getPointerType(op->getType());
}
+/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
QualType Sema::CheckIndirectionOperand(Expr *Op, SourceLocation OpLoc) {
if (Op->isTypeDependent())
return Context.DependentTy;
UsualUnaryConversions(Op);
- QualType Ty = Op->getType();
-
- // Note that per both C89 and C99, this is always legal, even if ptype is an
- // incomplete type or void. It would be possible to warn about dereferencing
- // a void pointer, but it's completely well-defined, and such a warning is
- // unlikely to catch any mistakes.
- if (const PointerType *PT = Ty->getAs<PointerType>())
- return PT->getPointeeType();
-
- if (const ObjCObjectPointerType *OPT = Ty->getAs<ObjCObjectPointerType>())
- return OPT->getPointeeType();
-
- Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
- << Ty << Op->getSourceRange();
- return QualType();
+ QualType OpTy = Op->getType();
+ QualType Result;
+
+ // Note that per both C89 and C99, indirection is always legal, even if OpTy
+ // is an incomplete type or void. It would be possible to warn about
+ // dereferencing a void pointer, but it's completely well-defined, and such a
+ // warning is unlikely to catch any mistakes.
+ if (const PointerType *PT = OpTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ObjCObjectPointerType *OPT =
+ OpTy->getAs<ObjCObjectPointerType>())
+ Result = OPT->getPointeeType();
+
+ if (Result.isNull()) {
+ Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
+ << OpTy << Op->getSourceRange();
+ return QualType();
+ }
+
+ return Result;
}
static inline BinaryOperator::Opcode ConvertTokenKindToBinaryOpcode(
@@ -6251,7 +6381,7 @@ Action::OwningExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BinaryOperator::LAnd:
case BinaryOperator::LOr:
- ResultTy = CheckLogicalOperands(lhs, rhs, OpLoc);
+ ResultTy = CheckLogicalOperands(lhs, rhs, OpLoc, Opc);
break;
case BinaryOperator::MulAssign:
case BinaryOperator::DivAssign:
@@ -6479,7 +6609,8 @@ Action::OwningExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
resultType = Input->getType();
if (resultType->isDependentType())
break;
- if (resultType->isArithmeticType()) // C99 6.5.3.3p1
+ if (resultType->isArithmeticType() || // C99 6.5.3.3p1
+ resultType->isVectorType())
break;
else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6-7
resultType->isEnumeralType())
@@ -6980,83 +7111,99 @@ void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *BlockScope) {
BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc);
PushBlockScope(BlockScope, Block);
CurContext->addDecl(Block);
- PushDeclContext(BlockScope, Block);
+ if (BlockScope)
+ PushDeclContext(BlockScope, Block);
+ else
+ CurContext = Block;
}
void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!");
BlockScopeInfo *CurBlock = getCurBlock();
- if (ParamInfo.getNumTypeObjects() == 0
- || ParamInfo.getTypeObject(0).Kind != DeclaratorChunk::Function) {
- ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
- QualType T = GetTypeForDeclarator(ParamInfo, CurScope);
-
- if (T->isArrayType()) {
- Diag(ParamInfo.getSourceRange().getBegin(),
- diag::err_block_returns_array);
- return;
- }
-
- // The parameter list is optional, if there was none, assume ().
- if (!T->isFunctionType())
- T = Context.getFunctionType(T, 0, 0, false, 0, false, false, 0, 0,
- FunctionType::ExtInfo());
+ TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
+ CurBlock->TheDecl->setSignatureAsWritten(Sig);
+ QualType T = Sig->getType();
+
+ bool isVariadic;
+ QualType RetTy;
+ if (const FunctionType *Fn = T->getAs<FunctionType>()) {
+ CurBlock->FunctionType = T;
+ RetTy = Fn->getResultType();
+ isVariadic =
+ !isa<FunctionProtoType>(Fn) || cast<FunctionProtoType>(Fn)->isVariadic();
+ } else {
+ RetTy = T;
+ isVariadic = false;
+ }
- CurBlock->hasPrototype = true;
- CurBlock->isVariadic = false;
- // Check for a valid sentinel attribute on this block.
- if (CurBlock->TheDecl->getAttr<SentinelAttr>()) {
- Diag(ParamInfo.getAttributes()->getLoc(),
- diag::warn_attribute_sentinel_not_variadic) << 1;
- // FIXME: remove the attribute.
- }
- QualType RetTy = T.getTypePtr()->getAs<FunctionType>()->getResultType();
+ CurBlock->TheDecl->setIsVariadic(isVariadic);
- // Do not allow returning a objc interface by-value.
- if (RetTy->isObjCObjectType()) {
- Diag(ParamInfo.getSourceRange().getBegin(),
- diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
- return;
- }
+ // Don't allow returning an array by value.
+ if (RetTy->isArrayType()) {
+ Diag(ParamInfo.getSourceRange().getBegin(), diag::err_block_returns_array);
+ return;
+ }
- CurBlock->ReturnType = RetTy;
+ // Don't allow returning a objc interface by value.
+ if (RetTy->isObjCObjectType()) {
+ Diag(ParamInfo.getSourceRange().getBegin(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
return;
}
- // Analyze arguments to block.
- assert(ParamInfo.getTypeObject(0).Kind == DeclaratorChunk::Function &&
- "Not a function declarator!");
- DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getTypeObject(0).Fun;
-
- CurBlock->hasPrototype = FTI.hasPrototype;
- CurBlock->isVariadic = true;
-
- // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs function that takes
- // no arguments, not a function that takes a single void argument.
- if (FTI.hasPrototype &&
- FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
- (!FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType().getCVRQualifiers()&&
- FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType()->isVoidType())) {
- // empty arg list, don't push any params.
- CurBlock->isVariadic = false;
- } else if (FTI.hasPrototype) {
- for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
- ParmVarDecl *Param = FTI.ArgInfo[i].Param.getAs<ParmVarDecl>();
+ // Context.DependentTy is used as a placeholder for a missing block
+ // return type. TODO: what should we do with declarators like:
+ // ^ * { ... }
+ // If the answer is "apply template argument deduction"....
+ if (RetTy != Context.DependentTy)
+ CurBlock->ReturnType = RetTy;
+
+ // Push block parameters from the declarator if we had them.
+ llvm::SmallVector<ParmVarDecl*, 8> Params;
+ if (isa<FunctionProtoType>(T)) {
+ FunctionProtoTypeLoc TL = cast<FunctionProtoTypeLoc>(Sig->getTypeLoc());
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ ParmVarDecl *Param = TL.getArg(I);
if (Param->getIdentifier() == 0 &&
!Param->isImplicit() &&
!Param->isInvalidDecl() &&
!getLangOptions().CPlusPlus)
Diag(Param->getLocation(), diag::err_parameter_name_omitted);
- CurBlock->Params.push_back(Param);
+ Params.push_back(Param);
+ }
+
+ // Fake up parameter variables if we have a typedef, like
+ // ^ fntype { ... }
+ } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) {
+ for (FunctionProtoType::arg_type_iterator
+ I = Fn->arg_type_begin(), E = Fn->arg_type_end(); I != E; ++I) {
+ ParmVarDecl *Param =
+ BuildParmVarDeclForTypedef(CurBlock->TheDecl,
+ ParamInfo.getSourceRange().getBegin(),
+ *I);
+ Params.push_back(Param);
}
- CurBlock->isVariadic = FTI.isVariadic;
}
- CurBlock->TheDecl->setParams(CurBlock->Params.data(),
- CurBlock->Params.size());
- CurBlock->TheDecl->setIsVariadic(CurBlock->isVariadic);
+
+ // Set the parameters on the block decl.
+ if (!Params.empty())
+ CurBlock->TheDecl->setParams(Params.data(), Params.size());
+
+ // Finally we can process decl attributes.
ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
+ if (!isVariadic && CurBlock->TheDecl->getAttr<SentinelAttr>()) {
+ Diag(ParamInfo.getAttributes()->getLoc(),
+ diag::warn_attribute_sentinel_not_variadic) << 1;
+ // FIXME: remove the attribute.
+ }
+
+ // Put the parameter variables in scope. We can bail out immediately
+ // if we don't have any.
+ if (Params.empty())
+ return;
+
bool ShouldCheckShadow =
Diags.getDiagnosticLevel(diag::warn_decl_shadow) != Diagnostic::Ignored;
@@ -7072,25 +7219,6 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
PushOnScopeChains(*AI, CurBlock->TheScope);
}
}
-
- // Check for a valid sentinel attribute on this block.
- if (!CurBlock->isVariadic &&
- CurBlock->TheDecl->getAttr<SentinelAttr>()) {
- Diag(ParamInfo.getAttributes()->getLoc(),
- diag::warn_attribute_sentinel_not_variadic) << 1;
- // FIXME: remove the attribute.
- }
-
- // Analyze the return type.
- QualType T = GetTypeForDeclarator(ParamInfo, CurScope);
- QualType RetTy = T->getAs<FunctionType>()->getResultType();
-
- // Do not allow returning a objc interface by-value.
- if (RetTy->isObjCObjectType()) {
- Diag(ParamInfo.getSourceRange().getBegin(),
- diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
- } else if (!RetTy->isDependentType())
- CurBlock->ReturnType = RetTy;
}
/// ActOnBlockError - If there is an error parsing a block, this callback
@@ -7111,29 +7239,59 @@ Sema::OwningExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
Diag(CaretLoc, diag::err_blocks_disable);
BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
-
+
PopDeclContext();
QualType RetTy = Context.VoidTy;
if (!BSI->ReturnType.isNull())
RetTy = BSI->ReturnType;
- llvm::SmallVector<QualType, 8> ArgTypes;
- for (unsigned i = 0, e = BSI->Params.size(); i != e; ++i)
- ArgTypes.push_back(BSI->Params[i]->getType());
-
bool NoReturn = BSI->TheDecl->getAttr<NoReturnAttr>();
QualType BlockTy;
- if (!BSI->hasPrototype)
- BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0, false, false, 0, 0,
- FunctionType::ExtInfo(NoReturn, 0, CC_Default));
- else
- BlockTy = Context.getFunctionType(RetTy, ArgTypes.data(), ArgTypes.size(),
- BSI->isVariadic, 0, false, false, 0, 0,
- FunctionType::ExtInfo(NoReturn, 0, CC_Default));
+
+ // If the user wrote a function type in some form, try to use that.
+ if (!BSI->FunctionType.isNull()) {
+ const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>();
+
+ FunctionType::ExtInfo Ext = FTy->getExtInfo();
+ if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true);
+
+ // Turn protoless block types into nullary block types.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0,
+ false, false, 0, 0, Ext);
+
+ // Otherwise, if we don't need to change anything about the function type,
+ // preserve its sugar structure.
+ } else if (FTy->getResultType() == RetTy &&
+ (!NoReturn || FTy->getNoReturnAttr())) {
+ BlockTy = BSI->FunctionType;
+
+ // Otherwise, make the minimal modifications to the function type.
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+ BlockTy = Context.getFunctionType(RetTy,
+ FPT->arg_type_begin(),
+ FPT->getNumArgs(),
+ FPT->isVariadic(),
+ /*quals*/ 0,
+ FPT->hasExceptionSpec(),
+ FPT->hasAnyExceptionSpec(),
+ FPT->getNumExceptions(),
+ FPT->exception_begin(),
+ Ext);
+ }
+
+ // If we don't have a function type, just build one from nothing.
+ } else {
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0,
+ false, false, 0, 0,
+ FunctionType::ExtInfo(NoReturn, 0, CC_Default));
+ }
// FIXME: Check that return/parameter types are complete/non-abstract
- DiagnoseUnusedParameters(BSI->Params.begin(), BSI->Params.end());
+ DiagnoseUnusedParameters(BSI->TheDecl->param_begin(),
+ BSI->TheDecl->param_end());
BlockTy = Context.getBlockPointerType(BlockTy);
// If needed, diagnose invalid gotos and switches in the block.
@@ -7210,7 +7368,8 @@ Sema::OwningExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc,
// FIXME: Warn if a non-POD type is passed in.
expr.release();
- return Owned(new (Context) VAArgExpr(BuiltinLoc, E, T.getNonReferenceType(),
+ return Owned(new (Context) VAArgExpr(BuiltinLoc, E,
+ T.getNonLValueExprType(Context),
RPLoc));
}
@@ -7445,7 +7604,7 @@ Sema::PopExpressionEvaluationContext() {
void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
assert(D && "No declaration?");
- if (D->isUsed())
+ if (D->isUsed(false))
return;
// Mark a parameter or variable declaration "used", regardless of whether we're in a
@@ -7488,24 +7647,24 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
unsigned TypeQuals;
if (Constructor->isImplicit() && Constructor->isDefaultConstructor()) {
- if (!Constructor->isUsed())
+ if (!Constructor->isUsed(false))
DefineImplicitDefaultConstructor(Loc, Constructor);
} else if (Constructor->isImplicit() &&
Constructor->isCopyConstructor(TypeQuals)) {
- if (!Constructor->isUsed())
+ if (!Constructor->isUsed(false))
DefineImplicitCopyConstructor(Loc, Constructor, TypeQuals);
}
MarkVTableUsed(Loc, Constructor->getParent());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
- if (Destructor->isImplicit() && !Destructor->isUsed())
+ if (Destructor->isImplicit() && !Destructor->isUsed(false))
DefineImplicitDestructor(Loc, Destructor);
if (Destructor->isVirtual())
MarkVTableUsed(Loc, Destructor->getParent());
} else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D)) {
if (MethodDecl->isImplicit() && MethodDecl->isOverloadedOperator() &&
MethodDecl->getOverloadedOperator() == OO_Equal) {
- if (!MethodDecl->isUsed())
+ if (!MethodDecl->isUsed(false))
DefineImplicitCopyAssignment(Loc, MethodDecl);
} else if (MethodDecl->isVirtual())
MarkVTableUsed(Loc, MethodDecl->getParent());
@@ -7569,45 +7728,46 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) {
}
namespace {
- // Mark all of the declarations referenced
+ // Mark all of the declarations referenced
// FIXME: Not fully implemented yet! We need to have a better understanding
- // of when we're entering
+ // of when we're entering
class MarkReferencedDecls : public RecursiveASTVisitor<MarkReferencedDecls> {
Sema &S;
SourceLocation Loc;
-
+
public:
typedef RecursiveASTVisitor<MarkReferencedDecls> Inherited;
-
+
MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { }
-
- bool VisitTemplateArgument(const TemplateArgument &Arg);
- bool VisitRecordType(RecordType *T);
+
+ bool TraverseTemplateArgument(const TemplateArgument &Arg);
+ bool TraverseRecordType(RecordType *T);
};
}
-bool MarkReferencedDecls::VisitTemplateArgument(const TemplateArgument &Arg) {
+bool MarkReferencedDecls::TraverseTemplateArgument(
+ const TemplateArgument &Arg) {
if (Arg.getKind() == TemplateArgument::Declaration) {
S.MarkDeclarationReferenced(Loc, Arg.getAsDecl());
}
-
- return Inherited::VisitTemplateArgument(Arg);
+
+ return Inherited::TraverseTemplateArgument(Arg);
}
-bool MarkReferencedDecls::VisitRecordType(RecordType *T) {
+bool MarkReferencedDecls::TraverseRecordType(RecordType *T) {
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) {
const TemplateArgumentList &Args = Spec->getTemplateArgs();
- return VisitTemplateArguments(Args.getFlatArgumentList(),
- Args.flat_size());
+ return TraverseTemplateArguments(Args.getFlatArgumentList(),
+ Args.flat_size());
}
- return false;
+ return true;
}
void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
MarkReferencedDecls Marker(*this, Loc);
- Marker.Visit(Context.getCanonicalType(T));
+ Marker.TraverseType(Context.getCanonicalType(T));
}
/// \brief Emit a diagnostic that describes an effect on the run-time behavior
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
index 97de96a..090400f 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
@@ -52,6 +53,8 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc,
// }
//
// See also PR6358 and PR6359.
+ // For this reason, we're currently only doing the C++03 version of this
+ // code; the C++0x version has to wait until we get a proper spec.
QualType SearchType;
DeclContext *LookupCtx = 0;
bool isDependent = false;
@@ -68,50 +71,33 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc,
bool AlreadySearched = false;
bool LookAtPrefix = true;
- if (!getLangOptions().CPlusPlus0x) {
- // C++ [basic.lookup.qual]p6:
- // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
- // the type-names are looked up as types in the scope designated by the
- // nested-name-specifier. In a qualified-id of the form:
- //
- // ::[opt] nested-name-specifier ̃ class-name
- //
- // where the nested-name-specifier designates a namespace scope, and in
- // a qualified-id of the form:
- //
- // ::opt nested-name-specifier class-name :: ̃ class-name
- //
- // the class-names are looked up as types in the scope designated by
- // the nested-name-specifier.
- //
- // Here, we check the first case (completely) and determine whether the
- // code below is permitted to look at the prefix of the
- // nested-name-specifier (as we do in C++0x).
- DeclContext *DC = computeDeclContext(SS, EnteringContext);
- if (DC && DC->isFileContext()) {
- AlreadySearched = true;
- LookupCtx = DC;
- isDependent = false;
- } else if (DC && isa<CXXRecordDecl>(DC))
- LookAtPrefix = false;
- }
-
- // C++0x [basic.lookup.qual]p6:
- // If a pseudo-destructor-name (5.2.4) contains a
- // nested-name-specifier, the type-names are looked up as types
- // in the scope designated by the nested-name-specifier. Similarly, in
- // a qualified-id of the form:
+ // C++ [basic.lookup.qual]p6:
+ // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
+ // the type-names are looked up as types in the scope designated by the
+ // nested-name-specifier. In a qualified-id of the form:
+ //
+ // ::[opt] nested-name-specifier ̃ class-name
//
- // :: [opt] nested-name-specifier[opt] class-name :: ~class-name
+ // where the nested-name-specifier designates a namespace scope, and in
+ // a qualified-id of the form:
//
- // the second class-name is looked up in the same scope as the first.
+ // ::opt nested-name-specifier class-name :: ̃ class-name
//
- // To implement this, we look at the prefix of the
- // nested-name-specifier we were given, and determine the lookup
- // context from that.
+ // the class-names are looked up as types in the scope designated by
+ // the nested-name-specifier.
//
- // We also fold in the second case from the C++03 rules quoted further
- // above.
+ // Here, we check the first case (completely) and determine whether the
+ // code below is permitted to look at the prefix of the
+ // nested-name-specifier.
+ DeclContext *DC = computeDeclContext(SS, EnteringContext);
+ if (DC && DC->isFileContext()) {
+ AlreadySearched = true;
+ LookupCtx = DC;
+ isDependent = false;
+ } else if (DC && isa<CXXRecordDecl>(DC))
+ LookAtPrefix = false;
+
+ // The second case from the C++03 rules quoted further above.
NestedNameSpecifier *Prefix = 0;
if (AlreadySearched) {
// Nothing left to do.
@@ -120,11 +106,6 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc,
PrefixSS.setScopeRep(Prefix);
LookupCtx = computeDeclContext(PrefixSS, EnteringContext);
isDependent = isDependentScopeSpecifier(PrefixSS);
- } else if (getLangOptions().CPlusPlus0x &&
- (LookupCtx = computeDeclContext(SS, EnteringContext))) {
- if (!LookupCtx->isTranslationUnit())
- LookupCtx = LookupCtx->getParent();
- isDependent = LookupCtx && LookupCtx->isDependentContext();
} else if (ObjectTypePtr) {
LookupCtx = computeDeclContext(SearchType);
isDependent = SearchType->isDependentType();
@@ -284,7 +265,10 @@ Sema::OwningExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// that is the operand of typeid are always ignored.
// If the type of the type-id is a class type or a reference to a class
// type, the class shall be completely-defined.
- QualType T = Operand->getType().getNonReferenceType();
+ Qualifiers Quals;
+ QualType T
+ = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
+ Quals);
if (T->getAs<RecordType>() &&
RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
return ExprError();
@@ -328,9 +312,11 @@ Sema::OwningExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// cv-qualified type, the result of the typeid expression refers to a
// std::type_info object representing the cv-unqualified referenced
// type.
- if (T.hasQualifiers()) {
- ImpCastExprToType(E, T.getUnqualifiedType(), CastExpr::CK_NoOp,
- E->isLvalue(Context));
+ Qualifiers Quals;
+ QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
+ if (!Context.hasSameType(T, UnqualT)) {
+ T = UnqualT;
+ ImpCastExprToType(E, UnqualT, CastExpr::CK_NoOp, E->isLvalue(Context));
Operand.release();
Operand = Owned(E);
}
@@ -453,11 +439,28 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
return true;
E = Res.takeAs<Expr>();
+ // If the exception has class type, we need additional handling.
+ const RecordType *RecordTy = Ty->getAs<RecordType>();
+ if (!RecordTy)
+ return false;
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+
// If we are throwing a polymorphic class type or pointer thereof,
// exception handling will make use of the vtable.
- if (const RecordType *RecordTy = Ty->getAs<RecordType>())
- MarkVTableUsed(ThrowLoc, cast<CXXRecordDecl>(RecordTy->getDecl()));
-
+ MarkVTableUsed(ThrowLoc, RD);
+
+ // If the class has a non-trivial destructor, we must be able to call it.
+ if (RD->hasTrivialDestructor())
+ return false;
+
+ CXXDestructorDecl *Destructor
+ = const_cast<CXXDestructorDecl*>(LookupDestructor(RD));
+ if (!Destructor)
+ return false;
+
+ MarkDeclarationReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_exception) << Ty);
return false;
}
@@ -537,33 +540,26 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep,
exprs.release();
- return Owned(new (Context) CXXFunctionalCastExpr(Ty.getNonReferenceType(),
+ return Owned(new (Context) CXXFunctionalCastExpr(
+ Ty.getNonLValueExprType(Context),
TInfo, TyBeginLoc, Kind,
Exprs[0], BasePath,
RParenLoc));
}
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
-
- if (NumExprs > 1 || !Record->hasTrivialConstructor() ||
- !Record->hasTrivialDestructor()) {
- InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty);
- InitializationKind Kind
- = NumExprs ? InitializationKind::CreateDirect(TypeRange.getBegin(),
- LParenLoc, RParenLoc)
- : InitializationKind::CreateValue(TypeRange.getBegin(),
- LParenLoc, RParenLoc);
- InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
- OwningExprResult Result = InitSeq.Perform(*this, Entity, Kind,
- move(exprs));
-
- // FIXME: Improve AST representation?
- return move(Result);
- }
-
- // Fall through to value-initialize an object of class type that
- // doesn't have a user-declared default constructor.
+ if (Ty->isRecordType()) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty);
+ InitializationKind Kind
+ = NumExprs ? InitializationKind::CreateDirect(TypeRange.getBegin(),
+ LParenLoc, RParenLoc)
+ : InitializationKind::CreateValue(TypeRange.getBegin(),
+ LParenLoc, RParenLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
+ OwningExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ move(exprs));
+
+ // FIXME: Improve AST representation?
+ return move(Result);
}
// C++ [expr.type.conv]p1:
@@ -582,7 +578,7 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep,
// rvalue of the specified type, which is value-initialized.
//
exprs.release();
- return Owned(new (Context) CXXZeroInitValueExpr(Ty, TyBeginLoc, RParenLoc));
+ return Owned(new (Context) CXXScalarValueInitExpr(Ty, TyBeginLoc, RParenLoc));
}
@@ -594,7 +590,7 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep,
Action::OwningExprResult
Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
- SourceLocation PlacementRParen, bool ParenTypeId,
+ SourceLocation PlacementRParen, SourceRange TypeIdParens,
Declarator &D, SourceLocation ConstructorLParen,
MultiExprArg ConstructorArgs,
SourceLocation ConstructorRParen) {
@@ -610,17 +606,6 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
<< D.getSourceRange());
- if (ParenTypeId) {
- // Can't have dynamic array size when the type-id is in parentheses.
- Expr *NumElts = (Expr *)Chunk.Arr.NumElts;
- if (!NumElts->isTypeDependent() && !NumElts->isValueDependent() &&
- !NumElts->isIntegerConstantExpr(Context)) {
- Diag(D.getTypeObject(0).Loc, diag::err_new_paren_array_nonconst)
- << NumElts->getSourceRange();
- return ExprError();
- }
- }
-
ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
D.DropFirstTypeObject();
}
@@ -644,19 +629,20 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
//FIXME: Store TypeSourceInfo in CXXNew expression.
- TypeSourceInfo *TInfo = 0;
- QualType AllocType = GetTypeForDeclarator(D, /*Scope=*/0, &TInfo);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0);
+ QualType AllocType = TInfo->getType();
if (D.isInvalidType())
return ExprError();
-
+
+ SourceRange R = TInfo->getTypeLoc().getSourceRange();
return BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
move(PlacementArgs),
PlacementRParen,
- ParenTypeId,
+ TypeIdParens,
AllocType,
D.getSourceRange().getBegin(),
- D.getSourceRange(),
+ R,
Owned(ArraySize),
ConstructorLParen,
move(ConstructorArgs),
@@ -668,7 +654,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId,
+ SourceRange TypeIdParens,
QualType AllocType,
SourceLocation TypeLoc,
SourceRange TypeRange,
@@ -697,11 +683,29 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
// or enumeration type with a non-negative value."
Expr *ArraySize = (Expr *)ArraySizeE.get();
if (ArraySize && !ArraySize->isTypeDependent()) {
+
QualType SizeType = ArraySize->getType();
- if (!SizeType->isIntegralType() && !SizeType->isEnumeralType())
- return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
- diag::err_array_size_not_integral)
- << SizeType << ArraySize->getSourceRange());
+
+ OwningExprResult ConvertedSize
+ = ConvertToIntegralOrEnumerationType(StartLoc, move(ArraySizeE),
+ PDiag(diag::err_array_size_not_integral),
+ PDiag(diag::err_array_size_incomplete_type)
+ << ArraySize->getSourceRange(),
+ PDiag(diag::err_array_size_explicit_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(diag::err_array_size_ambiguous_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(getLangOptions().CPlusPlus0x? 0
+ : diag::ext_array_size_conversion));
+ if (ConvertedSize.isInvalid())
+ return ExprError();
+
+ ArraySize = ConvertedSize.takeAs<Expr>();
+ ArraySizeE = Owned(ArraySize);
+ SizeType = ArraySize->getType();
+ if (!SizeType->isIntegralOrEnumerationType())
+ return ExprError();
+
// Let's see if this is a constant < 0. If so, we reject it out of hand.
// We don't care about special rules, so we tell the machinery it's not
// evaluated - it gives us a result in more cases.
@@ -714,6 +718,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
diag::err_typecheck_negative_array_size)
<< ArraySize->getSourceRange());
+ } else if (TypeIdParens.isValid()) {
+ // Can't have dynamic array size when the type-id is in parentheses.
+ Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst)
+ << ArraySize->getSourceRange()
+ << FixItHint::CreateRemoval(TypeIdParens.getBegin())
+ << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+
+ TypeIdParens = SourceRange();
}
}
@@ -828,13 +840,15 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
PlacementArgs.release();
ConstructorArgs.release();
ArraySizeE.release();
+
+ // FIXME: The TypeSourceInfo should also be included in CXXNewExpr.
return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew,
- PlaceArgs, NumPlaceArgs, ParenTypeId,
+ PlaceArgs, NumPlaceArgs, TypeIdParens,
ArraySize, Constructor, Init,
ConsArgs, NumConsArgs, OperatorDelete,
ResultType, StartLoc,
Init ? ConstructorRParen :
- SourceLocation()));
+ TypeRange.getEnd()));
}
/// CheckAllocatedType - Checks that a type is suitable as the allocated type
@@ -1181,20 +1195,11 @@ void Sema::DeclareGlobalNewDelete() {
// "std" or "bad_alloc" as necessary to form the exception specification.
// However, we do not make these implicit declarations visible to name
// lookup.
- if (!StdNamespace) {
- // The "std" namespace has not yet been defined, so build one implicitly.
- StdNamespace = NamespaceDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- SourceLocation(),
- &PP.getIdentifierTable().get("std"));
- StdNamespace->setImplicit(true);
- }
-
if (!StdBadAlloc) {
// The "std::bad_alloc" class has not yet been declared, so build it
// implicitly.
StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
- StdNamespace,
+ getStdNamespace(),
SourceLocation(),
&PP.getIdentifierTable().get("bad_alloc"),
SourceLocation(), 0);
@@ -1291,11 +1296,15 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
if (Found.isAmbiguous())
return true;
+ Found.suppressDiagnostics();
+
for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
F != FEnd; ++F) {
if (CXXMethodDecl *Delete = dyn_cast<CXXMethodDecl>(*F))
if (Delete->isUsualDeallocationFunction()) {
Operator = Delete;
+ CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
+ F.getPair());
return false;
}
}
@@ -1436,7 +1445,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
return ExprError();
if (!RD->hasTrivialDestructor())
- if (const CXXDestructorDecl *Dtor = RD->getDestructor(Context))
+ if (const CXXDestructorDecl *Dtor = LookupDestructor(RD))
MarkDeclarationReferenced(StartLoc,
const_cast<CXXDestructorDecl*>(Dtor));
}
@@ -1517,7 +1526,7 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
// be converted to an rvalue of type "pointer to char"; a wide
// string literal can be converted to an rvalue of type "pointer
// to wchar_t" (C++ 4.2p2).
- if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From))
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens()))
if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
if (const BuiltinType *ToPointeeType
= ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
@@ -1776,7 +1785,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
break;
case ICK_Floating_Integral:
- if (ToType->isFloatingType())
+ if (ToType->isRealFloatingType())
ImpCastExprToType(From, ToType, CastExpr::CK_IntegralToFloating);
else
ImpCastExprToType(From, ToType, CastExpr::CK_FloatingToIntegral);
@@ -1871,7 +1880,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
case ICK_Qualification:
// FIXME: Not sure about lvalue vs rvalue here in the presence of rvalue
// references.
- ImpCastExprToType(From, ToType.getNonReferenceType(),
+ ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
CastExpr::CK_NoOp, ToType->isLValueReferenceType());
if (SCS.DeprecatedStringLiteralToCharPtr)
@@ -1973,7 +1982,7 @@ QualType Sema::CheckPointerToMemberOperands(
BasePath);
}
- if (isa<CXXZeroInitValueExpr>(rex->IgnoreParens())) {
+ if (isa<CXXScalarValueInitExpr>(rex->IgnoreParens())) {
// Diagnose use of pointer-to-member type which when used as
// the functional cast in a pointer-to-member expression.
Diag(Loc, diag::err_pointer_to_member_type) << isIndirect;
@@ -2583,6 +2592,16 @@ Sema::OwningExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (FTy->getResultType()->isReferenceType())
return Owned(E);
}
+ else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ QualType Ty = ME->getType();
+ if (const PointerType *PT = Ty->getAs<PointerType>())
+ Ty = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = Ty->getAs<BlockPointerType>())
+ Ty = BPT->getPointeeType();
+ if (Ty->isReferenceType())
+ return Owned(E);
+ }
+
// That should be enough to guarantee that this type is complete.
// If it has a trivial destructor, we can avoid the extra copy.
@@ -2590,11 +2609,9 @@ Sema::OwningExprResult Sema::MaybeBindToTemporary(Expr *E) {
if (RD->hasTrivialDestructor())
return Owned(E);
- CXXTemporary *Temp = CXXTemporary::Create(Context,
- RD->getDestructor(Context));
+ CXXTemporary *Temp = CXXTemporary::Create(Context, LookupDestructor(RD));
ExprTemporaries.push_back(Temp);
- if (CXXDestructorDecl *Destructor =
- const_cast<CXXDestructorDecl*>(RD->getDestructor(Context))) {
+ if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
MarkDeclarationReferenced(E->getExprLoc(), Destructor);
CheckDestructorAccess(E->getExprLoc(), Destructor,
PDiag(diag::err_access_dtor_temp)
@@ -2819,7 +2836,7 @@ Sema::OwningExprResult Sema::BuildPseudoDestructorExpr(ExprArg Base,
if (ScopeTypeInfo) {
QualType ScopeType = ScopeTypeInfo->getType();
if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
- !Context.hasSameType(ScopeType, ObjectType)) {
+ !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) {
Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(),
diag::err_pseudo_dtor_type_mismatch)
@@ -2891,7 +2908,8 @@ Sema::OwningExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, ExprArg Base,
// record types and dependent types matter.
void *ObjectTypePtrForLookup = 0;
if (!SS.isSet()) {
- ObjectTypePtrForLookup = (void *)ObjectType->getAs<RecordType>();
+ ObjectTypePtrForLookup = const_cast<RecordType*>(
+ ObjectType->getAs<RecordType>());
if (!ObjectTypePtrForLookup && ObjectType->isDependentType())
ObjectTypePtrForLookup = Context.DependentTy.getAsOpaquePtr();
}
@@ -3012,7 +3030,7 @@ CXXMemberCallExpr *Sema::BuildCXXMemberCallExpr(Expr *Exp,
MemberExpr *ME =
new (Context) MemberExpr(Exp, /*IsArrow=*/false, Method,
SourceLocation(), Method->getType());
- QualType ResultType = Method->getResultType().getNonReferenceType();
+ QualType ResultType = Method->getCallResultType();
MarkDeclarationReferenced(Exp->getLocStart(), Method);
CXXMemberCallExpr *CE =
new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
index 695a1be..9f43471 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -207,7 +207,7 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
return false;
}
- ReturnType = Method->getResultType().getNonReferenceType();
+ ReturnType = Method->getSendResultType();
unsigned NumNamedArgs = Sel.getNumArgs();
// Method might have more arguments than selector indicates. This is due
@@ -346,7 +346,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
if (DiagnosePropertyAccessorMismatch(PD, Getter, MemberLoc))
- ResTy = Getter->getResultType();
+ ResTy = Getter->getSendResultType();
return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
MemberLoc, BaseExpr));
}
@@ -402,7 +402,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
if (Getter) {
QualType PType;
- PType = Getter->getResultType();
+ PType = Getter->getSendResultType();
return Owned(new (Context) ObjCImplicitSetterGetterRefExpr(Getter, PType,
Setter, MemberLoc, BaseExpr));
}
@@ -510,7 +510,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
QualType PType;
if (Getter)
- PType = Getter->getResultType();
+ PType = Getter->getSendResultType();
else {
for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
E = Setter->param_end(); PI != E; ++PI)
@@ -1007,6 +1007,12 @@ Sema::OwningExprResult Sema::BuildInstanceMessage(ExprArg ReceiverE,
if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, false,
LBracLoc, RBracLoc, ReturnType))
return ExprError();
+
+ if (!ReturnType->isVoidType()) {
+ if (RequireCompleteType(LBracLoc, ReturnType,
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
+ }
// Construct the appropriate ObjCMessageExpr instance.
Expr *Result;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
index 20f0c79..7ad1775 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -523,8 +523,9 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
StructuredList->setSyntacticForm(IList);
CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
Index, StructuredList, StructuredIndex, TopLevelObject);
- IList->setType(T.getNonReferenceType());
- StructuredList->setType(T.getNonReferenceType());
+ QualType ExprTy = T.getNonLValueExprType(SemaRef.Context);
+ IList->setType(ExprTy);
+ StructuredList->setType(ExprTy);
if (hadError)
return;
@@ -877,10 +878,15 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
StructuredList, StructuredIndex);
++numEltsInit;
} else {
+ QualType VecType;
const VectorType *IVT = IType->getAs<VectorType>();
unsigned numIElts = IVT->getNumElements();
- QualType VecType = SemaRef.Context.getExtVectorType(elementType,
- numIElts);
+
+ if (IType->isExtVectorType())
+ VecType = SemaRef.Context.getExtVectorType(elementType, numIElts);
+ else
+ VecType = SemaRef.Context.getVectorType(elementType, numIElts,
+ IVT->getAltiVecSpecific());
CheckSubElementType(ElementEntity, IList, VecType, Index,
StructuredList, StructuredIndex);
numEltsInit += numIElts;
@@ -1114,7 +1120,7 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
}
// Emit warnings for missing struct field initializers.
- if (CheckForMissingFields && Field != FieldEnd &&
+ if (InitializedSomething && CheckForMissingFields && Field != FieldEnd &&
!Field->getType()->isIncompleteArrayType() && !DeclType->isUnionType()) {
// It is possible we have one or more unnamed bitfields remaining.
// Find first (if any) named field and emit warning.
@@ -1711,7 +1717,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
InitRange.getBegin(), 0, 0,
InitRange.getEnd());
- Result->setType(CurrentObjectType.getNonReferenceType());
+ Result->setType(CurrentObjectType.getNonLValueExprType(SemaRef.Context));
// Pre-allocate storage for the structured initializer list.
unsigned NumElements = 0;
@@ -1956,6 +1962,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_Base:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_BlockElement:
return DeclarationName();
}
@@ -1977,6 +1984,7 @@ DeclaratorDecl *InitializedEntity::getDecl() const {
case EK_Base:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_BlockElement:
return 0;
}
@@ -1998,6 +2006,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Base:
case EK_ArrayElement:
case EK_VectorElement:
+ case EK_BlockElement:
break;
}
@@ -2195,7 +2204,7 @@ static void TryListInitialization(Sema &S,
// FIXME: We only perform rudimentary checking of list
// initializations at this point, then assume that any list
// initialization of an array, aggregate, or scalar will be
- // well-formed. We we actually "perform" list initialization, we'll
+ // well-formed. When we actually "perform" list initialization, we'll
// do all of the necessary checking. C++0x initializer lists will
// force us to perform more checking here.
Sequence.setSequenceKind(InitializationSequence::ListInitialization);
@@ -2236,8 +2245,6 @@ static void TryListInitialization(Sema &S,
/// \brief Try a reference initialization that involves calling a conversion
/// function.
-///
-/// FIXME: look intos DRs 656, 896
static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -2271,11 +2278,8 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// The type we're converting to is a class type. Enumerate its constructors
// to see if there is a suitable conversion.
CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
- DeclarationName ConstructorName
- = S.Context.DeclarationNames.getCXXConstructorName(
- S.Context.getCanonicalType(T1).getUnqualifiedType());
DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = T1RecordDecl->lookup(ConstructorName);
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(T1RecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
@@ -2328,7 +2332,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
if (ConvTemplate)
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
- Conv = cast<CXXConversionDecl>(*I);
+ Conv = cast<CXXConversionDecl>(D);
// If the conversion function doesn't return a reference type,
// it can't be considered for this conversion unless we're allowed to
@@ -2367,13 +2371,14 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// Add the user-defined conversion step.
Sequence.AddUserConversionStep(Function, Best->FoundDecl,
- T2.getNonReferenceType());
+ T2.getNonLValueExprType(S.Context));
// Determine whether we need to perform derived-to-base or
// cv-qualification adjustments.
bool NewDerivedToBase = false;
Sema::ReferenceCompareResult NewRefRelationship
- = S.CompareReferenceRelationship(DeclLoc, T1, T2.getNonReferenceType(),
+ = S.CompareReferenceRelationship(DeclLoc, T1,
+ T2.getNonLValueExprType(S.Context),
NewDerivedToBase);
if (NewRefRelationship == Sema::Ref_Incompatible) {
// If the type we've converted to is not reference-related to the
@@ -2398,14 +2403,14 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
return OR_Success;
}
-/// \brief Attempt reference initialization (C++0x [dcl.init.list])
+/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
static void TryReferenceInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr *Initializer,
InitializationSequence &Sequence) {
Sequence.setSequenceKind(InitializationSequence::ReferenceBinding);
-
+
QualType DestType = Entity.getType();
QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
Qualifiers T1Quals;
@@ -2414,7 +2419,7 @@ static void TryReferenceInitialization(Sema &S,
Qualifiers T2Quals;
QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
SourceLocation DeclLoc = Initializer->getLocStart();
-
+
// If the initializer is the address of an overloaded function, try
// to resolve the overloaded function. If all goes well, T2 is the
// type of the resulting function.
@@ -2428,29 +2433,33 @@ static void TryReferenceInitialization(Sema &S,
Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
return;
}
-
+
Sequence.AddAddressOverloadResolutionStep(Fn, Found);
cv2T2 = Fn->getType();
T2 = cv2T2.getUnqualifiedType();
}
-
+
// Compute some basic properties of the types and the initializer.
bool isLValueRef = DestType->isLValueReferenceType();
bool isRValueRef = !isLValueRef;
bool DerivedToBase = false;
- Expr::isLvalueResult InitLvalue = Initializer->isLvalue(S.Context);
+ Expr::Classification InitCategory = Initializer->Classify(S.Context);
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase);
-
+
// C++0x [dcl.init.ref]p5:
// A reference to type "cv1 T1" is initialized by an expression of type
// "cv2 T2" as follows:
//
// - If the reference is an lvalue reference and the initializer
// expression
+ // Note the analogous bullet points for rvlaue refs to functions. Because
+ // there are no function rvalues in C++, rvalue refs to functions are treated
+ // like lvalue refs.
OverloadingResult ConvOvlResult = OR_Success;
- if (isLValueRef) {
- if (InitLvalue == Expr::LV_Valid &&
+ bool T1Function = T1->isFunctionType();
+ if (isLValueRef || T1Function) {
+ if (InitCategory.isLValue() &&
RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
// - is an lvalue (but is not a bit-field), and "cv1 T1" is
// reference-compatible with "cv2 T2," or
@@ -2478,10 +2487,13 @@ static void TryReferenceInitialization(Sema &S,
// with "cv3 T3" (this conversion is selected by enumerating the
// applicable conversion functions (13.3.1.6) and choosing the best
// one through overload resolution (13.3)),
- if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType()) {
+ // If we have an rvalue ref to function type here, the rhs must be
+ // an rvalue.
+ if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
+ (isLValueRef || InitCategory.isRValue())) {
ConvOvlResult = TryRefInitWithConversionFunction(S, Entity, Kind,
Initializer,
- /*AllowRValues=*/false,
+ /*AllowRValues=*/isRValueRef,
Sequence);
if (ConvOvlResult == OR_Success)
return;
@@ -2492,19 +2504,20 @@ static void TryReferenceInitialization(Sema &S,
}
}
}
-
+
// - Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
// shall be an rvalue reference and the initializer expression shall
- // be an rvalue.
+ // be an rvalue or have a function type.
+ // We handled the function type stuff above.
if (!((isLValueRef && T1Quals.hasConst() && !T1Quals.hasVolatile()) ||
- (isRValueRef && InitLvalue != Expr::LV_Valid))) {
+ (isRValueRef && InitCategory.isRValue()))) {
if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
ConvOvlResult);
else if (isLValueRef)
- Sequence.SetFailed(InitLvalue == Expr::LV_Valid
+ Sequence.SetFailed(InitCategory.isLValue()
? (RefRelationship == Sema::Ref_Related
? InitializationSequence::FK_ReferenceInitDropsQualifiers
: InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated)
@@ -2512,15 +2525,15 @@ static void TryReferenceInitialization(Sema &S,
else
Sequence.SetFailed(
InitializationSequence::FK_RValueReferenceBindingToLValue);
-
+
return;
}
-
- // - If T1 and T2 are class types and
- if (T1->isRecordType() && T2->isRecordType()) {
+
+ // - [If T1 is not a function type], if T2 is a class type and
+ if (!T1Function && T2->isRecordType()) {
// - the initializer expression is an rvalue and "cv1 T1" is
// reference-compatible with "cv2 T2", or
- if (InitLvalue != Expr::LV_Valid &&
+ if (InitCategory.isRValue() &&
RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
// The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
// compiler the freedom to perform a copy here or bind to the
@@ -2543,7 +2556,7 @@ static void TryReferenceInitialization(Sema &S,
Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
return;
}
-
+
// - T1 is not reference-related to T2 and the initializer expression
// can be implicitly converted to an rvalue of type "cv3 T3" (this
// conversion is selected by enumerating the applicable conversion
@@ -2576,15 +2589,17 @@ static void TryReferenceInitialization(Sema &S,
// from the initializer expression using the rules for a non-reference
// copy initialization (8.5). The reference is then bound to the
// temporary. [...]
+
// Determine whether we are allowed to call explicit constructors or
// explicit conversion operators.
bool AllowExplicit = (Kind.getKind() == InitializationKind::IK_Direct);
- ImplicitConversionSequence ICS
- = S.TryImplicitConversion(Initializer, cv1T1,
- /*SuppressUserConversions=*/false, AllowExplicit,
- /*FIXME:InOverloadResolution=*/false);
-
- if (ICS.isBad()) {
+
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ if (S.TryImplicitConversion(Sequence, TempEntity, Initializer,
+ /*SuppressUserConversions*/ false,
+ AllowExplicit,
+ /*FIXME:InOverloadResolution=*/false)) {
// FIXME: Use the conversion function set stored in ICS to turn
// this into an overloading ambiguity diagnostic. However, we need
// to keep that set as an OverloadCandidateSet rather than as some
@@ -2609,8 +2624,6 @@ static void TryReferenceInitialization(Sema &S,
return;
}
- // Perform the actual conversion.
- Sequence.AddConversionSequenceStep(ICS, cv1T1);
Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
return;
}
@@ -2661,11 +2674,8 @@ static void TryConstructorInitialization(Sema &S,
CXXRecordDecl *DestRecordDecl
= cast<CXXRecordDecl>(DestRecordType->getDecl());
- DeclarationName ConstructorName
- = S.Context.DeclarationNames.getCXXConstructorName(
- S.Context.getCanonicalType(DestType).getUnqualifiedType());
DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = DestRecordDecl->lookup(ConstructorName);
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
@@ -2764,8 +2774,7 @@ static void TryValueInitialization(Sema &S,
// zero-initialized and, if T’s implicitly-declared default
// constructor is non-trivial, that constructor is called.
if ((ClassDecl->getTagKind() == TTK_Class ||
- ClassDecl->getTagKind() == TTK_Struct) &&
- !ClassDecl->hasTrivialConstructor()) {
+ ClassDecl->getTagKind() == TTK_Struct)) {
Sequence.AddZeroInitializationStep(Entity.getType());
return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
}
@@ -2841,15 +2850,11 @@ static void TryUserDefinedConversion(Sema &S,
// Try to complete the type we're converting to.
if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
- DeclarationName ConstructorName
- = S.Context.DeclarationNames.getCXXConstructorName(
- S.Context.getCanonicalType(DestType).getUnqualifiedType());
DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd) = DestRecordDecl->lookup(ConstructorName);
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
- bool SuppressUserConversions = false;
// Find the constructor (which may be a template).
CXXConstructorDecl *Constructor = 0;
@@ -2858,17 +2863,8 @@ static void TryUserDefinedConversion(Sema &S,
if (ConstructorTmpl)
Constructor = cast<CXXConstructorDecl>(
ConstructorTmpl->getTemplatedDecl());
- else {
+ else
Constructor = cast<CXXConstructorDecl>(D);
-
- // If we're performing copy initialization using a copy constructor,
- // we suppress user-defined conversions on the arguments.
- // FIXME: Move constructors?
- if (Kind.getKind() == InitializationKind::IK_Copy &&
- Constructor->isCopyConstructor())
- SuppressUserConversions = true;
-
- }
if (!Constructor->isInvalidDecl() &&
Constructor->isConvertingConstructor(AllowExplicit)) {
@@ -2876,11 +2872,11 @@ static void TryUserDefinedConversion(Sema &S,
S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
/*ExplicitArgs*/ 0,
&Initializer, 1, CandidateSet,
- SuppressUserConversions);
+ /*SuppressUserConversions=*/true);
else
S.AddOverloadCandidate(Constructor, FoundDecl,
&Initializer, 1, CandidateSet,
- SuppressUserConversions);
+ /*SuppressUserConversions=*/true);
}
}
}
@@ -2948,7 +2944,7 @@ static void TryUserDefinedConversion(Sema &S,
}
// Add the user-defined conversion step that calls the conversion function.
- QualType ConvType = Function->getResultType().getNonReferenceType();
+ QualType ConvType = Function->getCallResultType();
if (ConvType->getAs<RecordType>()) {
// If we're converting to a class type, there may be an copy if
// the resulting temporary object (possible to create an object of
@@ -2973,25 +2969,22 @@ static void TryUserDefinedConversion(Sema &S,
}
}
-/// \brief Attempt an implicit conversion (C++ [conv]) converting from one
-/// non-class type to another.
-static void TryImplicitConversion(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- Expr *Initializer,
- InitializationSequence &Sequence) {
+bool Sema::TryImplicitConversion(InitializationSequence &Sequence,
+ const InitializedEntity &Entity,
+ Expr *Initializer,
+ bool SuppressUserConversions,
+ bool AllowExplicitConversions,
+ bool InOverloadResolution) {
ImplicitConversionSequence ICS
- = S.TryImplicitConversion(Initializer, Entity.getType(),
- /*SuppressUserConversions=*/true,
- /*AllowExplicit=*/false,
- /*InOverloadResolution=*/false);
-
- if (ICS.isBad()) {
- Sequence.SetFailed(InitializationSequence::FK_ConversionFailed);
- return;
- }
-
+ = TryImplicitConversion(Initializer, Entity.getType(),
+ SuppressUserConversions,
+ AllowExplicitConversions,
+ InOverloadResolution);
+ if (ICS.isBad()) return true;
+
+ // Perform the actual conversion.
Sequence.AddConversionSequenceStep(ICS, Entity.getType());
+ return false;
}
InitializationSequence::InitializationSequence(Sema &S,
@@ -3125,8 +3118,13 @@ InitializationSequence::InitializationSequence(Sema &S,
// conversions (Clause 4) will be used, if necessary, to convert the
// initializer expression to the cv-unqualified version of the
// destination type; no user-defined conversions are considered.
- setSequenceKind(StandardConversion);
- TryImplicitConversion(S, Entity, Kind, Initializer, *this);
+ if (S.TryImplicitConversion(*this, Entity, Initializer,
+ /*SuppressUserConversions*/ true,
+ /*AllowExplicitConversions*/ false,
+ /*InOverloadResolution*/ false))
+ SetFailed(InitializationSequence::FK_ConversionFailed);
+ else
+ setSequenceKind(StandardConversion);
}
InitializationSequence::~InitializationSequence() {
@@ -3168,6 +3166,7 @@ getAssignmentAction(const InitializedEntity &Entity) {
case InitializedEntity::EK_Member:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_BlockElement:
return Sema::AA_Initializing;
}
@@ -3186,6 +3185,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Base:
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_BlockElement:
return false;
case InitializedEntity::EK_Parameter:
@@ -3205,6 +3205,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_New:
case InitializedEntity::EK_Base:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_BlockElement:
return false;
case InitializedEntity::EK_Variable:
@@ -3289,6 +3290,7 @@ static Sema::OwningExprResult CopyObject(Sema &S,
case InitializedEntity::EK_New:
case InitializedEntity::EK_Base:
case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_BlockElement:
Loc = CurInitExpr->getLocStart();
break;
}
@@ -3298,12 +3300,9 @@ static Sema::OwningExprResult CopyObject(Sema &S,
return move(CurInit);
// Perform overload resolution using the class's copy constructors.
- DeclarationName ConstructorName
- = S.Context.DeclarationNames.getCXXConstructorName(
- S.Context.getCanonicalType(S.Context.getTypeDeclType(Class)));
DeclContext::lookup_iterator Con, ConEnd;
OverloadCandidateSet CandidateSet(Loc);
- for (llvm::tie(Con, ConEnd) = Class->lookup(ConstructorName);
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class);
Con != ConEnd; ++Con) {
// Only consider copy constructors.
CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(*Con);
@@ -3324,12 +3323,16 @@ static Sema::OwningExprResult CopyObject(Sema &S,
break;
case OR_No_Viable_Function:
- S.Diag(Loc, diag::err_temp_copy_no_viable)
+ S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext()
+ ? diag::ext_rvalue_to_reference_temp_copy_no_viable
+ : diag::err_temp_copy_no_viable)
<< (int)Entity.getKind() << CurInitExpr->getType()
<< CurInitExpr->getSourceRange();
S.PrintOverloadCandidates(CandidateSet, Sema::OCD_AllCandidates,
&CurInitExpr, 1);
- return S.ExprError();
+ if (!IsExtraneousCopy || S.isSFINAEContext())
+ return S.ExprError();
+ return move(CurInit);
case OR_Ambiguous:
S.Diag(Loc, diag::err_temp_copy_ambiguous)
@@ -3353,7 +3356,7 @@ static Sema::OwningExprResult CopyObject(Sema &S,
CurInit.release(); // Ownership transferred into MultiExprArg, below.
S.CheckConstructorAccess(Loc, Constructor, Entity,
- Best->FoundDecl.getAccess());
+ Best->FoundDecl.getAccess(), IsExtraneousCopy);
if (IsExtraneousCopy) {
// If this is a totally extraneous copy for C++03 reference
@@ -3699,8 +3702,8 @@ InitializationSequence::Perform(Sema &S,
CurInitExpr = static_cast<Expr *>(CurInit.get());
QualType T = CurInitExpr->getType();
if (const RecordType *Record = T->getAs<RecordType>()) {
- CXXDestructorDecl *Destructor
- = cast<CXXRecordDecl>(Record->getDecl())->getDestructor(S.Context);
+ CXXDestructorDecl *Destructor
+ = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
S.CheckDestructorAccess(CurInitExpr->getLocStart(), Destructor,
S.PDiag(diag::err_access_dtor_temp) << T);
S.MarkDeclarationReferenced(CurInitExpr->getLocStart(), Destructor);
@@ -3836,7 +3839,7 @@ InitializationSequence::Perform(Sema &S,
} else if (Kind.getKind() == InitializationKind::IK_Value &&
S.getLangOptions().CPlusPlus &&
!Kind.isImplicitValueInit()) {
- CurInit = S.Owned(new (S.Context) CXXZeroInitValueExpr(Step->Type,
+ CurInit = S.Owned(new (S.Context) CXXScalarValueInitExpr(Step->Type,
Kind.getRange().getBegin(),
Kind.getRange().getEnd()));
} else {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.h b/contrib/llvm/tools/clang/lib/Sema/SemaInit.h
index a9064ed..44c36a7 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.h
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.h
@@ -66,7 +66,10 @@ public:
EK_Base,
/// \brief The entity being initialized is an element of a vector.
/// or vector.
- EK_VectorElement
+ EK_VectorElement,
+ /// \brief The entity being initialized is a field of block descriptor for
+ /// the copied-in c++ object.
+ EK_BlockElement
};
private:
@@ -166,6 +169,11 @@ public:
return InitializedEntity(EK_Result, ReturnLoc, Type, NRVO);
}
+ static InitializedEntity InitializeBlock(SourceLocation BlockVarLoc,
+ QualType Type, bool NRVO) {
+ return InitializedEntity(EK_BlockElement, BlockVarLoc, Type, NRVO);
+ }
+
/// \brief Create the initialization entity for an exception object.
static InitializedEntity InitializeException(SourceLocation ThrowLoc,
QualType Type, bool NRVO) {
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
index 4555a86..2e65183 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -447,11 +447,114 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) {
return false;
}
+/// \brief Determine whether we can declare a special member function within
+/// the class at this point.
+static bool CanDeclareSpecialMemberFunction(ASTContext &Context,
+ const CXXRecordDecl *Class) {
+ // We need to have a definition for the class.
+ if (!Class->getDefinition() || Class->isDependentContext())
+ return false;
+
+ // We can't be in the middle of defining the class.
+ if (const RecordType *RecordTy
+ = Context.getTypeDeclType(Class)->getAs<RecordType>())
+ return !RecordTy->isBeingDefined();
+
+ return false;
+}
+
+void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
+ if (!CanDeclareSpecialMemberFunction(Context, Class))
+ return;
+
+ // If the default constructor has not yet been declared, do so now.
+ if (!Class->hasDeclaredDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+
+ // If the copy constructor has not yet been declared, do so now.
+ if (!Class->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+
+ // If the copy assignment operator has not yet been declared, do so now.
+ if (!Class->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(Class);
+
+ // If the destructor has not yet been declared, do so now.
+ if (!Class->hasDeclaredDestructor())
+ DeclareImplicitDestructor(Class);
+}
+
+/// \brief Determine whether this is the name of an implicitly-declared
+/// special member function.
+static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ return true;
+
+ case DeclarationName::CXXOperatorName:
+ return Name.getCXXOverloadedOperator() == OO_Equal;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/// \brief If there are any implicit member functions with the given name
+/// that need to be declared in the given declaration context, do so.
+static void DeclareImplicitMemberFunctionsWithName(Sema &S,
+ DeclarationName Name,
+ const DeclContext *DC) {
+ if (!DC)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record)) {
+ if (!Record->hasDeclaredDefaultConstructor())
+ S.DeclareImplicitDefaultConstructor(
+ const_cast<CXXRecordDecl *>(Record));
+ if (!Record->hasDeclaredCopyConstructor())
+ S.DeclareImplicitCopyConstructor(const_cast<CXXRecordDecl *>(Record));
+ }
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() && !Record->hasDeclaredDestructor() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record))
+ S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record));
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ if (Name.getCXXOverloadedOperator() != OO_Equal)
+ break;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() && !Record->hasDeclaredCopyAssignment() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record))
+ S.DeclareImplicitCopyAssignment(const_cast<CXXRecordDecl *>(Record));
+ break;
+
+ default:
+ break;
+ }
+}
+
// Adds all qualifying matches for a name within a decl context to the
// given lookup result. Returns true if any matches were found.
static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
bool Found = false;
+ // Lazily declare C++ special member functions.
+ if (S.getLangOptions().CPlusPlus)
+ DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC);
+
+ // Perform lookup into this declaration context.
DeclContext::lookup_const_iterator I, E;
for (llvm::tie(I, E) = DC->lookup(R.getLookupName()); I != E; ++I) {
NamedDecl *D = *I;
@@ -640,6 +743,17 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
DeclarationName Name = R.getLookupName();
+ // If this is the name of an implicitly-declared special member function,
+ // go through the scope stack to implicitly declare
+ if (isImplicitlyDeclaredMemberFunctionName(Name)) {
+ for (Scope *PreS = S; PreS; PreS = PreS->getParent())
+ if (DeclContext *DC = static_cast<DeclContext *>(PreS->getEntity()))
+ DeclareImplicitMemberFunctionsWithName(*this, Name, DC);
+ }
+
+ // Implicitly declare member functions with the name we're looking for, if in
+ // fact we are in a scope where it matters.
+
Scope *Initial = S;
IdentifierResolver::iterator
I = IdResolver.begin(Name),
@@ -1127,7 +1241,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// If this isn't a C++ class, we aren't allowed to look into base
// classes, we're done.
CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx);
- if (!LookupRec)
+ if (!LookupRec || !LookupRec->getDefinition())
return false;
// If we're performing qualified name lookup into a dependent class,
@@ -1416,11 +1530,22 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
return true;
}
+namespace {
+ struct AssociatedLookup {
+ AssociatedLookup(Sema &S,
+ Sema::AssociatedNamespaceSet &Namespaces,
+ Sema::AssociatedClassSet &Classes)
+ : S(S), Namespaces(Namespaces), Classes(Classes) {
+ }
+
+ Sema &S;
+ Sema::AssociatedNamespaceSet &Namespaces;
+ Sema::AssociatedClassSet &Classes;
+ };
+}
+
static void
-addAssociatedClassesAndNamespaces(QualType T,
- ASTContext &Context,
- Sema::AssociatedNamespaceSet &AssociatedNamespaces,
- Sema::AssociatedClassSet &AssociatedClasses);
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T);
static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
DeclContext *Ctx) {
@@ -1439,10 +1564,8 @@ static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
// \brief Add the associated classes and namespaces for argument-dependent
// lookup that involves a template argument (C++ [basic.lookup.koenig]p2).
static void
-addAssociatedClassesAndNamespaces(const TemplateArgument &Arg,
- ASTContext &Context,
- Sema::AssociatedNamespaceSet &AssociatedNamespaces,
- Sema::AssociatedClassSet &AssociatedClasses) {
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ const TemplateArgument &Arg) {
// C++ [basic.lookup.koenig]p2, last bullet:
// -- [...] ;
switch (Arg.getKind()) {
@@ -1453,9 +1576,7 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg,
// [...] the namespaces and classes associated with the types of the
// template arguments provided for template type parameters (excluding
// template template parameters)
- addAssociatedClassesAndNamespaces(Arg.getAsType(), Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ addAssociatedClassesAndNamespaces(Result, Arg.getAsType());
break;
case TemplateArgument::Template: {
@@ -1467,9 +1588,9 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg,
= dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) {
DeclContext *Ctx = ClassTemplate->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
- AssociatedClasses.insert(EnclosingClass);
+ Result.Classes.insert(EnclosingClass);
// Add the associated namespace for this class.
- CollectEnclosingNamespace(AssociatedNamespaces, Ctx);
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
}
break;
}
@@ -1485,9 +1606,7 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg,
for (TemplateArgument::pack_iterator P = Arg.pack_begin(),
PEnd = Arg.pack_end();
P != PEnd; ++P)
- addAssociatedClassesAndNamespaces(*P, Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ addAssociatedClassesAndNamespaces(Result, *P);
break;
}
}
@@ -1496,10 +1615,13 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg,
// argument-dependent lookup with an argument of class type
// (C++ [basic.lookup.koenig]p2).
static void
-addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
- ASTContext &Context,
- Sema::AssociatedNamespaceSet &AssociatedNamespaces,
- Sema::AssociatedClassSet &AssociatedClasses) {
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ CXXRecordDecl *Class) {
+
+ // Just silently ignore anything whose name is __va_list_tag.
+ if (Class->getDeclName() == Result.S.VAListTagName)
+ return;
+
// C++ [basic.lookup.koenig]p2:
// [...]
// -- If T is a class type (including unions), its associated
@@ -1511,13 +1633,13 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
// Add the class of which it is a member, if any.
DeclContext *Ctx = Class->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
- AssociatedClasses.insert(EnclosingClass);
+ Result.Classes.insert(EnclosingClass);
// Add the associated namespace for this class.
- CollectEnclosingNamespace(AssociatedNamespaces, Ctx);
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
// Add the class itself. If we've already seen this class, we don't
// need to visit base classes.
- if (!AssociatedClasses.insert(Class))
+ if (!Result.Classes.insert(Class))
return;
// -- If T is a template-id, its associated namespaces and classes are
@@ -1533,15 +1655,13 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
= dyn_cast<ClassTemplateSpecializationDecl>(Class)) {
DeclContext *Ctx = Spec->getSpecializedTemplate()->getDeclContext();
if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
- AssociatedClasses.insert(EnclosingClass);
+ Result.Classes.insert(EnclosingClass);
// Add the associated namespace for this class.
- CollectEnclosingNamespace(AssociatedNamespaces, Ctx);
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- addAssociatedClassesAndNamespaces(TemplateArgs[I], Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]);
}
// Only recurse into base classes for complete types.
@@ -1573,10 +1693,10 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
if (!BaseType)
continue;
CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
- if (AssociatedClasses.insert(BaseDecl)) {
+ if (Result.Classes.insert(BaseDecl)) {
// Find the associated namespace for this base class.
DeclContext *BaseCtx = BaseDecl->getDeclContext();
- CollectEnclosingNamespace(AssociatedNamespaces, BaseCtx);
+ CollectEnclosingNamespace(Result.Namespaces, BaseCtx);
// Make sure we visit the bases of this base class.
if (BaseDecl->bases_begin() != BaseDecl->bases_end())
@@ -1590,10 +1710,7 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
// argument-dependent lookup with an argument of type T
// (C++ [basic.lookup.koenig]p2).
static void
-addAssociatedClassesAndNamespaces(QualType T,
- ASTContext &Context,
- Sema::AssociatedNamespaceSet &AssociatedNamespaces,
- Sema::AssociatedClassSet &AssociatedClasses) {
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
// C++ [basic.lookup.koenig]p2:
//
// For each argument type T in the function call, there is a set
@@ -1604,109 +1721,137 @@ addAssociatedClassesAndNamespaces(QualType T,
// argument). Typedef names and using-declarations used to specify
// the types do not contribute to this set. The sets of namespaces
// and classes are determined in the following way:
- T = Context.getCanonicalType(T).getUnqualifiedType();
- // -- If T is a pointer to U or an array of U, its associated
- // namespaces and classes are those associated with U.
- //
- // We handle this by unwrapping pointer and array types immediately,
- // to avoid unnecessary recursion.
+ llvm::SmallVector<const Type *, 16> Queue;
+ const Type *T = Ty->getCanonicalTypeInternal().getTypePtr();
+
while (true) {
- if (const PointerType *Ptr = T->getAs<PointerType>())
- T = Ptr->getPointeeType();
- else if (const ArrayType *Ptr = Context.getAsArrayType(T))
- T = Ptr->getElementType();
- else
+ switch (T->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ // T is canonical. We can also ignore dependent types because
+ // we don't need to do ADL at the definition point, but if we
+ // wanted to implement template export (or if we find some other
+ // use for associated classes and namespaces...) this would be
+ // wrong.
break;
- }
- // -- If T is a fundamental type, its associated sets of
- // namespaces and classes are both empty.
- if (T->getAs<BuiltinType>())
- return;
+ // -- If T is a pointer to U or an array of U, its associated
+ // namespaces and classes are those associated with U.
+ case Type::Pointer:
+ T = cast<PointerType>(T)->getPointeeType().getTypePtr();
+ continue;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ continue;
- // -- If T is a class type (including unions), its associated
- // classes are: the class itself; the class of which it is a
- // member, if any; and its direct and indirect base
- // classes. Its associated namespaces are the namespaces in
- // which its associated classes are defined.
- if (const RecordType *ClassType = T->getAs<RecordType>())
- if (CXXRecordDecl *ClassDecl
- = dyn_cast<CXXRecordDecl>(ClassType->getDecl())) {
- addAssociatedClassesAndNamespaces(ClassDecl, Context,
- AssociatedNamespaces,
- AssociatedClasses);
- return;
+ // -- If T is a fundamental type, its associated sets of
+ // namespaces and classes are both empty.
+ case Type::Builtin:
+ break;
+
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+ case Type::Record: {
+ CXXRecordDecl *Class
+ = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ addAssociatedClassesAndNamespaces(Result, Class);
+ break;
}
- // -- If T is an enumeration type, its associated namespace is
- // the namespace in which it is defined. If it is class
- // member, its associated class is the member’s class; else
- // it has no associated class.
- if (const EnumType *EnumT = T->getAs<EnumType>()) {
- EnumDecl *Enum = EnumT->getDecl();
+ // -- If T is an enumeration type, its associated namespace is
+ // the namespace in which it is defined. If it is class
+ // member, its associated class is the member’s class; else
+ // it has no associated class.
+ case Type::Enum: {
+ EnumDecl *Enum = cast<EnumType>(T)->getDecl();
- DeclContext *Ctx = Enum->getDeclContext();
- if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
- AssociatedClasses.insert(EnclosingClass);
+ DeclContext *Ctx = Enum->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
- // Add the associated namespace for this class.
- CollectEnclosingNamespace(AssociatedNamespaces, Ctx);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
- return;
- }
+ break;
+ }
- // -- If T is a function type, its associated namespaces and
- // classes are those associated with the function parameter
- // types and those associated with the return type.
- if (const FunctionType *FnType = T->getAs<FunctionType>()) {
- // Return type
- addAssociatedClassesAndNamespaces(FnType->getResultType(),
- Context,
- AssociatedNamespaces, AssociatedClasses);
+ // -- If T is a function type, its associated namespaces and
+ // classes are those associated with the function parameter
+ // types and those associated with the return type.
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ Queue.push_back(Arg->getTypePtr());
+ // fallthrough
+ }
+ case Type::FunctionNoProto: {
+ const FunctionType *FnType = cast<FunctionType>(T);
+ T = FnType->getResultType().getTypePtr();
+ continue;
+ }
- const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FnType);
- if (!Proto)
- return;
+ // -- If T is a pointer to a member function of a class X, its
+ // associated namespaces and classes are those associated
+ // with the function parameter types and return type,
+ // together with those associated with X.
+ //
+ // -- If T is a pointer to a data member of class X, its
+ // associated namespaces and classes are those associated
+ // with the member type together with those associated with
+ // X.
+ case Type::MemberPointer: {
+ const MemberPointerType *MemberPtr = cast<MemberPointerType>(T);
+
+ // Queue up the class type into which this points.
+ Queue.push_back(MemberPtr->getClass());
+
+ // And directly continue with the pointee type.
+ T = MemberPtr->getPointeeType().getTypePtr();
+ continue;
+ }
- // Argument types
- for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
- Arg != ArgEnd; ++Arg)
- addAssociatedClassesAndNamespaces(*Arg, Context,
- AssociatedNamespaces, AssociatedClasses);
+ // As an extension, treat this like a normal pointer.
+ case Type::BlockPointer:
+ T = cast<BlockPointerType>(T)->getPointeeType().getTypePtr();
+ continue;
- return;
- }
+ // References aren't covered by the standard, but that's such an
+ // obvious defect that we cover them anyway.
+ case Type::LValueReference:
+ case Type::RValueReference:
+ T = cast<ReferenceType>(T)->getPointeeType().getTypePtr();
+ continue;
- // -- If T is a pointer to a member function of a class X, its
- // associated namespaces and classes are those associated
- // with the function parameter types and return type,
- // together with those associated with X.
- //
- // -- If T is a pointer to a data member of class X, its
- // associated namespaces and classes are those associated
- // with the member type together with those associated with
- // X.
- if (const MemberPointerType *MemberPtr = T->getAs<MemberPointerType>()) {
- // Handle the type that the pointer to member points to.
- addAssociatedClassesAndNamespaces(MemberPtr->getPointeeType(),
- Context,
- AssociatedNamespaces,
- AssociatedClasses);
-
- // Handle the class type into which this points.
- if (const RecordType *Class = MemberPtr->getClass()->getAs<RecordType>())
- addAssociatedClassesAndNamespaces(cast<CXXRecordDecl>(Class->getDecl()),
- Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ // These are fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ break;
- return;
- }
+ // These are ignored by ADL.
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ break;
+ }
- // FIXME: What about block pointers?
- // FIXME: What about Objective-C message sends?
+ if (Queue.empty()) break;
+ T = Queue.back();
+ Queue.pop_back();
+ }
}
/// \brief Find the associated classes and namespaces for
@@ -1723,6 +1868,8 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
AssociatedNamespaces.clear();
AssociatedClasses.clear();
+ AssociatedLookup Result(*this, AssociatedNamespaces, AssociatedClasses);
+
// C++ [basic.lookup.koenig]p2:
// For each argument type T in the function call, there is a set
// of zero or more associated namespaces and a set of zero or more
@@ -1734,9 +1881,7 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
Expr *Arg = Args[ArgIdx];
if (Arg->getType() != Context.OverloadTy) {
- addAssociatedClassesAndNamespaces(Arg->getType(), Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ addAssociatedClassesAndNamespaces(Result, Arg->getType());
continue;
}
@@ -1752,17 +1897,11 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
if (unaryOp->getOpcode() == UnaryOperator::AddrOf)
Arg = unaryOp->getSubExpr();
- // TODO: avoid the copies. This should be easy when the cases
- // share a storage implementation.
- llvm::SmallVector<NamedDecl*, 8> Functions;
+ UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg);
+ if (!ULE) continue;
- if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg))
- Functions.append(ULE->decls_begin(), ULE->decls_end());
- else
- continue;
-
- for (llvm::SmallVectorImpl<NamedDecl*>::iterator I = Functions.begin(),
- E = Functions.end(); I != E; ++I) {
+ for (UnresolvedSetIterator I = ULE->decls_begin(), E = ULE->decls_end();
+ I != E; ++I) {
// Look through any using declarations to find the underlying function.
NamedDecl *Fn = (*I)->getUnderlyingDecl();
@@ -1772,9 +1911,7 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
// Add the classes and namespaces associated with the parameter
// types and return type of this function.
- addAssociatedClassesAndNamespaces(FDecl->getType(), Context,
- AssociatedNamespaces,
- AssociatedClasses);
+ addAssociatedClassesAndNamespaces(Result, FDecl->getType());
}
}
}
@@ -1874,6 +2011,36 @@ void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
}
}
+/// \brief Look up the constructors for the given class.
+DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
+ // If the copy constructor has not yet been declared, do so now.
+ if (CanDeclareSpecialMemberFunction(Context, Class)) {
+ if (!Class->hasDeclaredDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+ if (!Class->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+ }
+
+ CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
+ DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
+ return Class->lookup(Name);
+}
+
+/// \brief Look for the destructor of the given class.
+///
+/// During semantic analysis, this routine should be used in lieu of
+/// CXXRecordDecl::getDestructor().
+///
+/// \returns The destructor for this class.
+CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
+ // If the destructor has not yet been declared, do so now.
+ if (CanDeclareSpecialMemberFunction(Context, Class) &&
+ !Class->hasDeclaredDestructor())
+ DeclareImplicitDestructor(Class);
+
+ return Class->getDestructor();
+}
+
void ADLResult::insert(NamedDecl *New) {
NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())];
@@ -2172,6 +2339,9 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
if (Visited.visitedContext(Ctx->getPrimaryContext()))
return;
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.getSema().ForceDeclarationOfImplicitMembers(Class);
+
// Enumerate all of the results in this context.
for (DeclContext *CurCtx = Ctx->getPrimaryContext(); CurCtx;
CurCtx = CurCtx->getNextContext()) {
@@ -2556,7 +2726,7 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS,
bool EnteringContext,
CorrectTypoContext CTC,
const ObjCObjectPointerType *OPT) {
- if (Diags.hasFatalErrorOccurred())
+ if (Diags.hasFatalErrorOccurred() || !getLangOptions().SpellChecking)
return DeclarationName();
// Provide a stop gap for files that are just seriously broken. Trying
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
index 4c89a11..ff60599 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -41,7 +41,8 @@ Sema::DeclPtrTy Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
!(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
!(Attributes & ObjCDeclSpec::DQ_PR_copy)));
- QualType T = GetTypeForDeclarator(FD.D, S);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ QualType T = TSI->getType();
if (T->isReferenceType()) {
Diag(AtLoc, diag::error_reference_property);
return DeclPtrTy();
@@ -51,18 +52,22 @@ Sema::DeclPtrTy Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
cast<ObjCContainerDecl>(ClassCategory.getAs<Decl>());
if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl))
- if (CDecl->IsClassExtension())
- return HandlePropertyInClassExtension(S, CDecl, AtLoc,
- FD, GetterSel, SetterSel,
- isAssign, isReadWrite,
- Attributes,
- isOverridingProperty, T,
- MethodImplKind);
-
+ if (CDecl->IsClassExtension()) {
+ DeclPtrTy Res = HandlePropertyInClassExtension(S, CDecl, AtLoc,
+ FD, GetterSel, SetterSel,
+ isAssign, isReadWrite,
+ Attributes,
+ isOverridingProperty, TSI,
+ MethodImplKind);
+ if (Res)
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+ return Res;
+ }
+
DeclPtrTy Res = DeclPtrTy::make(CreatePropertyDecl(S, ClassDecl, AtLoc, FD,
- GetterSel, SetterSel,
- isAssign, isReadWrite,
- Attributes, T, MethodImplKind));
+ GetterSel, SetterSel,
+ isAssign, isReadWrite,
+ Attributes, TSI, MethodImplKind));
// Validate the attributes on the @property.
CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
return Res;
@@ -76,7 +81,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
const bool isReadWrite,
const unsigned Attributes,
bool *isOverridingProperty,
- QualType T,
+ TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind) {
// Diagnose if this property is already in continuation class.
@@ -122,6 +127,10 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
CreatePropertyDecl(S, CCPrimary, AtLoc,
FD, GetterSel, SetterSel, isAssign, isReadWrite,
Attributes, T, MethodImplKind, DC);
+ // Mark written attribute as having no attribute because
+ // this is not a user-written property declaration in primary
+ // class.
+ PDecl->setPropertyAttributesAsWritten(ObjCPropertyDecl::OBJC_PR_noattr);
// A case of continuation class adding a new property in the class. This
// is not what it was meant for. However, gcc supports it and so should we.
@@ -133,7 +142,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl,
// The property 'PIDecl's readonly attribute will be over-ridden
// with continuation class's readwrite property attribute!
- unsigned PIkind = PIDecl->getPropertyAttributes();
+ unsigned PIkind = PIDecl->getPropertyAttributesAsWritten();
if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
unsigned retainCopyNonatomic =
(ObjCPropertyDecl::OBJC_PR_retain |
@@ -190,11 +199,11 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
- QualType T,
+ TypeSourceInfo *TInfo,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC){
-
IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ QualType T = TInfo->getType();
// Issue a warning if property is 'assign' as default and its object, which is
// gc'able conforms to NSCopying protocol
@@ -215,7 +224,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
DeclContext *DC = cast<DeclContext>(CDecl);
ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
FD.D.getIdentifierLoc(),
- PropertyId, AtLoc, T);
+ PropertyId, AtLoc, TInfo);
if (ObjCPropertyDecl *prevDecl =
ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) {
@@ -265,6 +274,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ PDecl->setPropertyAttributesAsWritten(PDecl->getPropertyAttributes());
+
if (MethodImplKind == tok::objc_required)
PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
else if (MethodImplKind == tok::objc_optional)
@@ -771,7 +782,8 @@ bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl,
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
- llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) {
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap) {
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
E = IDecl->prop_end(); P != E; ++P) {
@@ -781,10 +793,7 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
// scan through class's protocols.
for (ObjCInterfaceDecl::protocol_iterator PI = IDecl->protocol_begin(),
E = IDecl->protocol_end(); PI != E; ++PI)
- // Exclude property for protocols which conform to class's super-class,
- // as super-class has to implement the property.
- if (!ProtocolConformsToSuperClass(IDecl, (*PI)))
- CollectImmediateProperties((*PI), PropMap);
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
}
if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) {
if (!CATDecl->IsClassExtension())
@@ -796,20 +805,25 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
// scan through class's protocols.
for (ObjCInterfaceDecl::protocol_iterator PI = CATDecl->protocol_begin(),
E = CATDecl->protocol_end(); PI != E; ++PI)
- CollectImmediateProperties((*PI), PropMap);
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
}
else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
E = PDecl->prop_end(); P != E; ++P) {
ObjCPropertyDecl *Prop = (*P);
- ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()];
- if (!PropEntry)
- PropEntry = Prop;
+ ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()];
+ // Exclude property for protocols which conform to class's super-class,
+ // as super-class has to implement the property.
+ if (!PropertyFromSuper || PropertyFromSuper != Prop) {
+ ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()];
+ if (!PropEntry)
+ PropEntry = Prop;
+ }
}
// scan through protocol's protocols.
for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
E = PDecl->protocol_end(); PI != E; ++PI)
- CollectImmediateProperties((*PI), PropMap);
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
}
}
@@ -854,33 +868,6 @@ static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
}
}
-/// ProtocolConformsToSuperClass - Returns true if class's given protocol
-/// conforms to one of its super class's protocols.
-bool Sema::ProtocolConformsToSuperClass(const ObjCInterfaceDecl *IDecl,
- const ObjCProtocolDecl *PDecl) {
- if (const ObjCInterfaceDecl *CDecl = IDecl->getSuperClass()) {
- for (ObjCInterfaceDecl::protocol_iterator PI = CDecl->protocol_begin(),
- E = CDecl->protocol_end(); PI != E; ++PI) {
- if (ProtocolConformsToProtocol((*PI), PDecl))
- return true;
- return ProtocolConformsToSuperClass(CDecl, PDecl);
- }
- }
- return false;
-}
-
-bool Sema::ProtocolConformsToProtocol(const ObjCProtocolDecl *NestedProtocol,
- const ObjCProtocolDecl *PDecl) {
- if (PDecl->getIdentifier() == NestedProtocol->getIdentifier())
- return true;
- // scan through protocol's protocols.
- for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
- E = PDecl->protocol_end(); PI != E; ++PI)
- if (ProtocolConformsToProtocol(NestedProtocol, (*PI)))
- return true;
- return false;
-}
-
/// LookupPropertyDecl - Looks up a property in the current class and all
/// its protocols.
ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
@@ -943,6 +930,10 @@ void Sema::DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier()))
continue;
+ // Property may have been synthesized by user.
+ if (IMPDecl->FindPropertyImplDecl(Prop->getIdentifier()))
+ continue;
+
ActOnPropertyImplDecl(S, IMPDecl->getLocation(), IMPDecl->getLocation(),
true, DeclPtrTy::make(IMPDecl),
Prop->getIdentifier(), Prop->getIdentifier());
@@ -952,8 +943,12 @@ void Sema::DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
const llvm::DenseSet<Selector>& InsMap) {
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
+
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap;
- CollectImmediateProperties(CDecl, PropMap);
+ CollectImmediateProperties(CDecl, PropMap, SuperPropMap);
if (PropMap.empty())
return;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
index 2754d44..c4ab906 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -155,7 +155,9 @@ bool StandardConversionSequence::isPointerConversionToBool() const {
// check for their presence as well as checking whether FromType is
// a pointer.
if (getToType(1)->isBooleanType() &&
- (getFromType()->isPointerType() || getFromType()->isBlockPointerType() ||
+ (getFromType()->isPointerType() ||
+ getFromType()->isObjCObjectPointerType() ||
+ getFromType()->isBlockPointerType() ||
First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
return true;
@@ -498,19 +500,54 @@ void OverloadCandidateSet::clear() {
// identical (return types of functions are not part of the
// signature), IsOverload returns false and MatchedDecl will be set to
// point to the FunctionDecl for #2.
+//
+// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced
+// into a class by a using declaration. The rules for whether to hide
+// shadow declarations ignore some properties which otherwise figure
+// into a function template's signature.
Sema::OverloadKind
-Sema::CheckOverload(FunctionDecl *New, const LookupResult &Old,
- NamedDecl *&Match) {
+Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
+ NamedDecl *&Match, bool NewIsUsingDecl) {
for (LookupResult::iterator I = Old.begin(), E = Old.end();
I != E; ++I) {
- NamedDecl *OldD = (*I)->getUnderlyingDecl();
+ NamedDecl *OldD = *I;
+
+ bool OldIsUsingDecl = false;
+ if (isa<UsingShadowDecl>(OldD)) {
+ OldIsUsingDecl = true;
+
+ // We can always introduce two using declarations into the same
+ // context, even if they have identical signatures.
+ if (NewIsUsingDecl) continue;
+
+ OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl();
+ }
+
+ // If either declaration was introduced by a using declaration,
+ // we'll need to use slightly different rules for matching.
+ // Essentially, these rules are the normal rules, except that
+ // function templates hide function templates with different
+ // return types or template parameter lists.
+ bool UseMemberUsingDeclRules =
+ (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord();
+
if (FunctionTemplateDecl *OldT = dyn_cast<FunctionTemplateDecl>(OldD)) {
- if (!IsOverload(New, OldT->getTemplatedDecl())) {
+ if (!IsOverload(New, OldT->getTemplatedDecl(), UseMemberUsingDeclRules)) {
+ if (UseMemberUsingDeclRules && OldIsUsingDecl) {
+ HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
+ continue;
+ }
+
Match = *I;
return Ovl_Match;
}
} else if (FunctionDecl *OldF = dyn_cast<FunctionDecl>(OldD)) {
- if (!IsOverload(New, OldF)) {
+ if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) {
+ if (UseMemberUsingDeclRules && OldIsUsingDecl) {
+ HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
+ continue;
+ }
+
Match = *I;
return Ovl_Match;
}
@@ -534,7 +571,8 @@ Sema::CheckOverload(FunctionDecl *New, const LookupResult &Old,
return Ovl_Overload;
}
-bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old) {
+bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
+ bool UseUsingDeclRules) {
FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
@@ -579,7 +617,10 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old) {
//
// We check the return type and template parameter lists for function
// templates first; the remaining checks follow.
- if (NewTemplate &&
+ //
+ // However, we don't consider either of these when deciding whether
+ // a member introduced by a shadow declaration is hidden.
+ if (!UseUsingDeclRules && NewTemplate &&
(!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
false, TPL_TemplateMatch) ||
@@ -804,7 +845,7 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType,
return false;
// Vector splat from any arithmetic type to a vector.
- if (!FromType->isVectorType() && FromType->isArithmeticType()) {
+ if (FromType->isArithmeticType()) {
ICK = ICK_Vector_Splat;
return true;
}
@@ -960,8 +1001,8 @@ Sema::IsStandardConversion(Expr* From, QualType ToType,
// Complex promotion (Clang extension)
SCS.Second = ICK_Complex_Promotion;
FromType = ToType.getUnqualifiedType();
- } else if ((FromType->isIntegralType() || FromType->isEnumeralType()) &&
- (ToType->isIntegralType() && !ToType->isEnumeralType())) {
+ } else if (FromType->isIntegralOrEnumerationType() &&
+ ToType->isIntegralType(Context)) {
// Integral conversions (C++ 4.7).
SCS.Second = ICK_Integral_Conversion;
FromType = ToType.getUnqualifiedType();
@@ -974,15 +1015,14 @@ Sema::IsStandardConversion(Expr* From, QualType ToType,
// Complex-real conversions (C99 6.3.1.7)
SCS.Second = ICK_Complex_Real;
FromType = ToType.getUnqualifiedType();
- } else if (FromType->isFloatingType() && ToType->isFloatingType()) {
+ } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
// Floating point conversions (C++ 4.8).
SCS.Second = ICK_Floating_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if ((FromType->isFloatingType() &&
- ToType->isIntegralType() && (!ToType->isBooleanType() &&
- !ToType->isEnumeralType())) ||
- ((FromType->isIntegralType() || FromType->isEnumeralType()) &&
- ToType->isFloatingType())) {
+ } else if ((FromType->isRealFloatingType() &&
+ ToType->isIntegralType(Context) && !ToType->isBooleanType()) ||
+ (FromType->isIntegralOrEnumerationType() &&
+ ToType->isRealFloatingType())) {
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
@@ -1141,7 +1181,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
if (From)
if (FieldDecl *MemberDecl = From->getBitField()) {
APSInt BitWidth;
- if (FromType->isIntegralType() && !FromType->isEnumeralType() &&
+ if (FromType->isIntegralType(Context) &&
MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
ToSize = Context.getTypeSize(ToType);
@@ -1271,7 +1311,7 @@ static bool isNullPointerConstantForConversion(Expr *Expr,
// Handle value-dependent integral null pointer constants correctly.
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903
if (Expr->isValueDependent() && !Expr->isTypeDependent() &&
- Expr->getType()->isIntegralType())
+ Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType())
return !InOverloadResolution;
return Expr->isNullPointerConstant(Context,
@@ -1622,6 +1662,12 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
bool IgnoreBaseAccess) {
QualType FromType = From->getType();
+ if (CXXBoolLiteralExpr* LitBool
+ = dyn_cast<CXXBoolLiteralExpr>(From->IgnoreParens()))
+ if (LitBool->getValue() == false)
+ Diag(LitBool->getExprLoc(), diag::warn_init_pointer_from_false)
+ << ToType;
+
if (const PointerType *FromPtrType = FromType->getAs<PointerType>())
if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) {
QualType FromPointeeType = FromPtrType->getPointeeType(),
@@ -1779,7 +1825,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType) {
// in multi-level pointers, subject to the following rules: [...]
bool PreviousToQualsIncludeConst = true;
bool UnwrappedAnyPointer = false;
- while (UnwrapSimilarPointerTypes(FromType, ToType)) {
+ while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
@@ -1850,12 +1896,8 @@ OverloadingResult Sema::IsUserDefinedConversion(Expr *From, QualType ToType,
// We're not going to find any constructors.
} else if (CXXRecordDecl *ToRecordDecl
= dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
- DeclarationName ConstructorName
- = Context.DeclarationNames.getCXXConstructorName(
- Context.getCanonicalType(ToType).getUnqualifiedType());
DeclContext::lookup_iterator Con, ConEnd;
- for (llvm::tie(Con, ConEnd)
- = ToRecordDecl->lookup(ConstructorName);
+ for (llvm::tie(Con, ConEnd) = LookupConstructors(ToRecordDecl);
Con != ConEnd; ++Con) {
NamedDecl *D = *Con;
DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
@@ -2067,6 +2109,16 @@ Sema::CompareImplicitConversionSequences(const ImplicitConversionSequence& ICS1,
return ImplicitConversionSequence::Indistinguishable;
}
+static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ Qualifiers Quals;
+ T1 = Context.getUnqualifiedArrayType(T1, Quals);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals);
+ }
+
+ return Context.hasSameUnqualifiedType(T1, T2);
+}
+
// Per 13.3.3.2p3, compare the given standard conversion sequences to
// determine if one is a proper subset of the other.
static ImplicitConversionSequence::CompareKind
@@ -2092,7 +2144,7 @@ compareStandardConversionSubsets(ASTContext &Context,
Result = ImplicitConversionSequence::Worse;
else
return ImplicitConversionSequence::Indistinguishable;
- } else if (!Context.hasSameType(SCS1.getToType(1), SCS2.getToType(1)))
+ } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1)))
return ImplicitConversionSequence::Indistinguishable;
if (SCS1.Third == SCS2.Third) {
@@ -2299,7 +2351,7 @@ Sema::CompareQualificationConversions(const StandardConversionSequence& SCS1,
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
- while (UnwrapSimilarPointerTypes(T1, T2)) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
@@ -2566,6 +2618,95 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
return Ref_Related;
}
+/// \brief Look for a user-defined conversion to an lvalue reference-compatible
+/// with DeclType. Return true if something definite is found.
+static bool
+FindConversionToLValue(Sema &S, ImplicitConversionSequence &ICS,
+ QualType DeclType, SourceLocation DeclLoc,
+ Expr *Init, QualType T2, bool AllowExplicit) {
+ assert(T2->isRecordType() && "Can only find conversions of record types.");
+ CXXRecordDecl *T2RecordDecl
+ = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
+
+ OverloadCandidateSet CandidateSet(DeclLoc);
+ const UnresolvedSetImpl *Conversions
+ = T2RecordDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate
+ = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion. An rvalue reference
+ // is only acceptable if its referencee is a function type.
+ const ReferenceType *RefType =
+ Conv->getConversionType()->getAs<ReferenceType>();
+ if (RefType && (RefType->isLValueReferenceType() ||
+ RefType->getPointeeType()->isFunctionType()) &&
+ (AllowExplicit || !Conv->isExplicit())) {
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
+ Init, DeclType, CandidateSet);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
+ DeclType, CandidateSet);
+ }
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (S.BestViableFunction(CandidateSet, DeclLoc, Best)) {
+ case OR_Success:
+ // C++ [over.ics.ref]p1:
+ //
+ // [...] If the parameter binds directly to the result of
+ // applying a conversion function to the argument
+ // expression, the implicit conversion sequence is a
+ // user-defined conversion sequence (13.3.3.1.2), with the
+ // second standard conversion sequence either an identity
+ // conversion or, if the conversion function returns an
+ // entity of a type that is a derived class of the parameter
+ // type, a derived-to-base Conversion.
+ if (!Best->FinalConversion.DirectBinding)
+ return false;
+
+ ICS.setUserDefined();
+ ICS.UserDefined.Before = Best->Conversions[0].Standard;
+ ICS.UserDefined.After = Best->FinalConversion;
+ ICS.UserDefined.ConversionFunction = Best->Function;
+ ICS.UserDefined.EllipsisConversion = false;
+ assert(ICS.UserDefined.After.ReferenceBinding &&
+ ICS.UserDefined.After.DirectBinding &&
+ "Expected a direct reference binding!");
+ return true;
+
+ case OR_Ambiguous:
+ ICS.setAmbiguous();
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
+ Cand != CandidateSet.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ return true;
+
+ case OR_No_Viable_Function:
+ case OR_Deleted:
+ // There was no suitable conversion, or we found a deleted
+ // conversion; continue with other checks.
+ return false;
+ }
+
+ return false;
+}
+
/// \brief Compute an implicit conversion sequence for reference
/// initialization.
static ImplicitConversionSequence
@@ -2595,149 +2736,72 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// Compute some basic properties of the types and the initializer.
bool isRValRef = DeclType->isRValueReferenceType();
bool DerivedToBase = false;
- Expr::isLvalueResult InitLvalue = Init->isLvalue(S.Context);
+ Expr::Classification InitCategory = Init->Classify(S.Context);
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase);
- // C++ [over.ics.ref]p3:
- // Except for an implicit object parameter, for which see 13.3.1,
- // a standard conversion sequence cannot be formed if it requires
- // binding an lvalue reference to non-const to an rvalue or
- // binding an rvalue reference to an lvalue.
- //
- // FIXME: DPG doesn't trust this code. It seems far too early to
- // abort because of a binding of an rvalue reference to an lvalue.
- if (isRValRef && InitLvalue == Expr::LV_Valid)
- return ICS;
-
- // C++0x [dcl.init.ref]p16:
+ // C++0x [dcl.init.ref]p5:
// A reference to type "cv1 T1" is initialized by an expression
// of type "cv2 T2" as follows:
- // -- If the initializer expression
- // -- is an lvalue (but is not a bit-field), and "cv1 T1" is
- // reference-compatible with "cv2 T2," or
- //
- // Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
- if (InitLvalue == Expr::LV_Valid &&
- RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
- // C++ [over.ics.ref]p1:
- // When a parameter of reference type binds directly (8.5.3)
- // to an argument expression, the implicit conversion sequence
- // is the identity conversion, unless the argument expression
- // has a type that is a derived class of the parameter type,
- // in which case the implicit conversion sequence is a
- // derived-to-base Conversion (13.3.3.1).
- ICS.setStandard();
- ICS.Standard.First = ICK_Identity;
- ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity;
- ICS.Standard.Third = ICK_Identity;
- ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
- ICS.Standard.setToType(0, T2);
- ICS.Standard.setToType(1, T1);
- ICS.Standard.setToType(2, T1);
- ICS.Standard.ReferenceBinding = true;
- ICS.Standard.DirectBinding = true;
- ICS.Standard.RRefBinding = false;
- ICS.Standard.CopyConstructor = 0;
-
- // Nothing more to do: the inaccessibility/ambiguity check for
- // derived-to-base conversions is suppressed when we're
- // computing the implicit conversion sequence (C++
- // [over.best.ics]p2).
- return ICS;
- }
-
- // -- has a class type (i.e., T2 is a class type), where T1 is
- // not reference-related to T2, and can be implicitly
- // converted to an lvalue of type "cv3 T3," where "cv1 T1"
- // is reference-compatible with "cv3 T3" 92) (this
- // conversion is selected by enumerating the applicable
- // conversion functions (13.3.1.6) and choosing the best
- // one through overload resolution (13.3)),
- if (!isRValRef && !SuppressUserConversions && T2->isRecordType() &&
- !S.RequireCompleteType(DeclLoc, T2, 0) &&
- RefRelationship == Sema::Ref_Incompatible) {
- CXXRecordDecl *T2RecordDecl
- = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
-
- OverloadCandidateSet CandidateSet(DeclLoc);
- const UnresolvedSetImpl *Conversions
- = T2RecordDecl->getVisibleConversionFunctions();
- for (UnresolvedSetImpl::iterator I = Conversions->begin(),
- E = Conversions->end(); I != E; ++I) {
- NamedDecl *D = *I;
- CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
- if (isa<UsingShadowDecl>(D))
- D = cast<UsingShadowDecl>(D)->getTargetDecl();
-
- FunctionTemplateDecl *ConvTemplate
- = dyn_cast<FunctionTemplateDecl>(D);
- CXXConversionDecl *Conv;
- if (ConvTemplate)
- Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
- else
- Conv = cast<CXXConversionDecl>(D);
-
- // If the conversion function doesn't return a reference type,
- // it can't be considered for this conversion.
- if (Conv->getConversionType()->isLValueReferenceType() &&
- (AllowExplicit || !Conv->isExplicit())) {
- if (ConvTemplate)
- S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
- Init, DeclType, CandidateSet);
- else
- S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
- DeclType, CandidateSet);
- }
- }
-
- OverloadCandidateSet::iterator Best;
- switch (S.BestViableFunction(CandidateSet, DeclLoc, Best)) {
- case OR_Success:
+ // -- If reference is an lvalue reference and the initializer expression
+ // The next bullet point (T1 is a function) is pretty much equivalent to this
+ // one, so it's handled here.
+ if (!isRValRef || T1->isFunctionType()) {
+ // -- is an lvalue (but is not a bit-field), and "cv1 T1" is
+ // reference-compatible with "cv2 T2," or
+ //
+ // Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
+ if (InitCategory.isLValue() &&
+ RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
// C++ [over.ics.ref]p1:
- //
- // [...] If the parameter binds directly to the result of
- // applying a conversion function to the argument
- // expression, the implicit conversion sequence is a
- // user-defined conversion sequence (13.3.3.1.2), with the
- // second standard conversion sequence either an identity
- // conversion or, if the conversion function returns an
- // entity of a type that is a derived class of the parameter
- // type, a derived-to-base Conversion.
- if (!Best->FinalConversion.DirectBinding)
- break;
-
- ICS.setUserDefined();
- ICS.UserDefined.Before = Best->Conversions[0].Standard;
- ICS.UserDefined.After = Best->FinalConversion;
- ICS.UserDefined.ConversionFunction = Best->Function;
- ICS.UserDefined.EllipsisConversion = false;
- assert(ICS.UserDefined.After.ReferenceBinding &&
- ICS.UserDefined.After.DirectBinding &&
- "Expected a direct reference binding!");
- return ICS;
-
- case OR_Ambiguous:
- ICS.setAmbiguous();
- for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
- Cand != CandidateSet.end(); ++Cand)
- if (Cand->Viable)
- ICS.Ambiguous.addConversion(Cand->Function);
+ // When a parameter of reference type binds directly (8.5.3)
+ // to an argument expression, the implicit conversion sequence
+ // is the identity conversion, unless the argument expression
+ // has a type that is a derived class of the parameter type,
+ // in which case the implicit conversion sequence is a
+ // derived-to-base Conversion (13.3.3.1).
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.RRefBinding = isRValRef;
+ ICS.Standard.CopyConstructor = 0;
+
+ // Nothing more to do: the inaccessibility/ambiguity check for
+ // derived-to-base conversions is suppressed when we're
+ // computing the implicit conversion sequence (C++
+ // [over.best.ics]p2).
return ICS;
+ }
- case OR_No_Viable_Function:
- case OR_Deleted:
- // There was no suitable conversion, or we found a deleted
- // conversion; continue with other checks.
- break;
+ // -- has a class type (i.e., T2 is a class type), where T1 is
+ // not reference-related to T2, and can be implicitly
+ // converted to an lvalue of type "cv3 T3," where "cv1 T1"
+ // is reference-compatible with "cv3 T3" 92) (this
+ // conversion is selected by enumerating the applicable
+ // conversion functions (13.3.1.6) and choosing the best
+ // one through overload resolution (13.3)),
+ if (!SuppressUserConversions && T2->isRecordType() &&
+ !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ RefRelationship == Sema::Ref_Incompatible) {
+ if (FindConversionToLValue(S, ICS, DeclType, DeclLoc,
+ Init, T2, AllowExplicit))
+ return ICS;
}
}
- // -- Otherwise, the reference shall be to a non-volatile const
- // type (i.e., cv1 shall be const), or the reference shall be an
- // rvalue reference and the initializer expression shall be an rvalue.
+ // -- Otherwise, the reference shall be an lvalue reference to a
+ // non-volatile const type (i.e., cv1 shall be const), or the reference
+ // shall be an rvalue reference and the initializer expression shall be
+ // an rvalue or have a function type.
//
// We actually handle one oddity of C++ [over.ics.ref] at this
// point, which is that, due to p2 (which short-circuits reference
@@ -2746,10 +2810,26 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
// reference to bind to an rvalue. Hence the check for the presence
// of "const" rather than checking for "const" being the only
// qualifier.
- if (!isRValRef && !T1.isConstQualified())
+ // This is also the point where rvalue references and lvalue inits no longer
+ // go together.
+ if ((!isRValRef && !T1.isConstQualified()) ||
+ (isRValRef && InitCategory.isLValue()))
+ return ICS;
+
+ // -- If T1 is a function type, then
+ // -- if T2 is the same type as T1, the reference is bound to the
+ // initializer expression lvalue;
+ // -- if T2 is a class type and the initializer expression can be
+ // implicitly converted to an lvalue of type T1 [...], the
+ // reference is bound to the function lvalue that is the result
+ // of the conversion;
+ // This is the same as for the lvalue case above, so it was handled there.
+ // -- otherwise, the program is ill-formed.
+ // This is the one difference to the lvalue case.
+ if (T1->isFunctionType())
return ICS;
- // -- if T2 is a class type and
+ // -- Otherwise, if T2 is a class type and
// -- the initializer expression is an rvalue and "cv1 T1"
// is reference-compatible with "cv2 T2," or
//
@@ -2768,7 +2848,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType,
//
// We're only checking the first case here, which is a direct
// binding in C++0x but not in C++03.
- if (InitLvalue != Expr::LV_Valid && T2->isRecordType() &&
+ if (InitCategory.isRValue() && T2->isRecordType() &&
RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
ICS.setStandard();
ICS.Standard.First = ICK_Identity;
@@ -3012,6 +3092,177 @@ bool Sema::PerformContextuallyConvertToObjCId(Expr *&From) {
return true;
}
+/// \brief Attempt to convert the given expression to an integral or
+/// enumeration type.
+///
+/// This routine will attempt to convert an expression of class type to an
+/// integral or enumeration type, if that class type only has a single
+/// conversion to an integral or enumeration type.
+///
+/// \param Loc The source location of the construct that requires the
+/// conversion.
+///
+/// \param FromE The expression we're converting from.
+///
+/// \param NotIntDiag The diagnostic to be emitted if the expression does not
+/// have integral or enumeration type.
+///
+/// \param IncompleteDiag The diagnostic to be emitted if the expression has
+/// incomplete class type.
+///
+/// \param ExplicitConvDiag The diagnostic to be emitted if we're calling an
+/// explicit conversion function (because no implicit conversion functions
+/// were available). This is a recovery mode.
+///
+/// \param ExplicitConvNote The note to be emitted with \p ExplicitConvDiag,
+/// showing which conversion was picked.
+///
+/// \param AmbigDiag The diagnostic to be emitted if there is more than one
+/// conversion function that could convert to integral or enumeration type.
+///
+/// \param AmbigNote The note to be emitted with \p AmbigDiag for each
+/// usable conversion function.
+///
+/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion
+/// function, which may be an extension in this case.
+///
+/// \returns The expression, converted to an integral or enumeration type if
+/// successful.
+Sema::OwningExprResult
+Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, ExprArg FromE,
+ const PartialDiagnostic &NotIntDiag,
+ const PartialDiagnostic &IncompleteDiag,
+ const PartialDiagnostic &ExplicitConvDiag,
+ const PartialDiagnostic &ExplicitConvNote,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &AmbigNote,
+ const PartialDiagnostic &ConvDiag) {
+ Expr *From = static_cast<Expr *>(FromE.get());
+
+ // We can't perform any more checking for type-dependent expressions.
+ if (From->isTypeDependent())
+ return move(FromE);
+
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = From->getType();
+ if (T->isIntegralOrEnumerationType())
+ return move(FromE);
+
+ // FIXME: Check for missing '()' if T is a function type?
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy || !getLangOptions().CPlusPlus) {
+ Diag(Loc, NotIntDiag)
+ << T << From->getSourceRange();
+ return move(FromE);
+ }
+
+ // We must have a complete class type.
+ if (RequireCompleteType(Loc, T, IncompleteDiag))
+ return move(FromE);
+
+ // Look for a conversion to an integral or enumeration type.
+ UnresolvedSet<4> ViableConversions;
+ UnresolvedSet<4> ExplicitConversions;
+ const UnresolvedSetImpl *Conversions
+ = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end();
+ I != E;
+ ++I) {
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl()))
+ if (Conversion->getConversionType().getNonReferenceType()
+ ->isIntegralOrEnumerationType()) {
+ if (Conversion->isExplicit())
+ ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
+ else
+ ViableConversions.addDecl(I.getDecl(), I.getAccess());
+ }
+ }
+
+ switch (ViableConversions.size()) {
+ case 0:
+ if (ExplicitConversions.size() == 1) {
+ DeclAccessPair Found = ExplicitConversions[0];
+ CXXConversionDecl *Conversion
+ = cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+
+ // The user probably meant to invoke the given explicit
+ // conversion; use it.
+ QualType ConvTy
+ = Conversion->getConversionType().getNonReferenceType();
+ std::string TypeStr;
+ ConvTy.getAsStringInternal(TypeStr, Context.PrintingPolicy);
+
+ Diag(Loc, ExplicitConvDiag)
+ << T << ConvTy
+ << FixItHint::CreateInsertion(From->getLocStart(),
+ "static_cast<" + TypeStr + ">(")
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(From->getLocEnd()),
+ ")");
+ Diag(Conversion->getLocation(), ExplicitConvNote)
+ << ConvTy->isEnumeralType() << ConvTy;
+
+ // If we aren't in a SFINAE context, build a call to the
+ // explicit conversion function.
+ if (isSFINAEContext())
+ return ExprError();
+
+ CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
+ From = BuildCXXMemberCallExpr(FromE.takeAs<Expr>(), Found, Conversion);
+ FromE = Owned(From);
+ }
+
+ // We'll complain below about a non-integral condition type.
+ break;
+
+ case 1: {
+ // Apply this conversion.
+ DeclAccessPair Found = ViableConversions[0];
+ CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
+
+ CXXConversionDecl *Conversion
+ = cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+ QualType ConvTy
+ = Conversion->getConversionType().getNonReferenceType();
+ if (ConvDiag.getDiagID()) {
+ if (isSFINAEContext())
+ return ExprError();
+
+ Diag(Loc, ConvDiag)
+ << T << ConvTy->isEnumeralType() << ConvTy << From->getSourceRange();
+ }
+
+ From = BuildCXXMemberCallExpr(FromE.takeAs<Expr>(), Found,
+ cast<CXXConversionDecl>(Found->getUnderlyingDecl()));
+ FromE = Owned(From);
+ break;
+ }
+
+ default:
+ Diag(Loc, AmbigDiag)
+ << T << From->getSourceRange();
+ for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
+ QualType ConvTy = Conv->getConversionType().getNonReferenceType();
+ Diag(Conv->getLocation(), AmbigNote)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+ return move(FromE);
+ }
+
+ if (!From->getType()->isIntegralOrEnumerationType())
+ Diag(Loc, NotIntDiag)
+ << From->getType() << From->getSourceRange();
+
+ return move(FromE);
+}
+
/// AddOverloadCandidate - Adds the given function to the set of
/// candidate functions, using the given function call arguments. If
/// @p SuppressUserConversions, then don't allow user-defined
@@ -3476,7 +3727,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
CallExpr Call(Context, &ConversionFn, 0, 0,
- Conversion->getConversionType().getNonReferenceType(),
+ Conversion->getConversionType().getNonLValueExprType(Context),
From->getLocStart());
ImplicitConversionSequence ICS =
TryCopyInitialization(*this, &Call, ToType,
@@ -4949,7 +5200,7 @@ Sema::isBetterOverloadCandidate(const OverloadCandidate& Cand1,
// - F1 is a non-template function and F2 is a function template
// specialization, or, if not that,
- if (Cand1.Function && !Cand1.Function->getPrimaryTemplate() &&
+ if ((!Cand1.Function || !Cand1.Function->getPrimaryTemplate()) &&
Cand2.Function && Cand2.Function->getPrimaryTemplate())
return true;
@@ -5230,6 +5481,46 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
return;
}
+ // Diagnose base -> derived pointer conversions.
+ unsigned BaseToDerivedConversion = 0;
+ if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) {
+ if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) {
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ !FromPtrTy->getPointeeType()->isIncompleteType() &&
+ !ToPtrTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(ToPtrTy->getPointeeType(),
+ FromPtrTy->getPointeeType()))
+ BaseToDerivedConversion = 1;
+ }
+ } else if (const ObjCObjectPointerType *FromPtrTy
+ = FromTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *ToPtrTy
+ = ToTy->getAs<ObjCObjectPointerType>())
+ if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl())
+ if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl())
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ FromIface->isSuperClassOf(ToIface))
+ BaseToDerivedConversion = 2;
+ } else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) {
+ if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
+ !FromTy->isIncompleteType() &&
+ !ToRefTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy))
+ BaseToDerivedConversion = 3;
+ }
+
+ if (BaseToDerivedConversion) {
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_bad_base_to_derived_conv)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (BaseToDerivedConversion - 1)
+ << FromTy << ToTy << I+1;
+ return;
+ }
+
// TODO: specialize more based on the kind of mismatch
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv)
<< (unsigned) FnKind << FnDesc
@@ -5673,7 +5964,10 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet,
Cands.push_back(Cand);
else if (OCD == OCD_AllCandidates) {
CompleteNonViableCandidate(*this, Cand, Args, NumArgs);
- Cands.push_back(Cand);
+ if (Cand->Function || Cand->IsSurrogate)
+ Cands.push_back(Cand);
+ // Otherwise, this a non-viable builtin candidate. We do not, in general,
+ // want to list every possible builtin candidate.
}
}
@@ -5683,17 +5977,26 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet,
bool ReportedAmbiguousConversions = false;
llvm::SmallVectorImpl<OverloadCandidate*>::iterator I, E;
+ const Diagnostic::OverloadsShown ShowOverloads = Diags.getShowOverloads();
+ unsigned CandsShown = 0;
for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
OverloadCandidate *Cand = *I;
+ // Set an arbitrary limit on the number of candidate functions we'll spam
+ // the user with. FIXME: This limit should depend on details of the
+ // candidate list.
+ if (CandsShown >= 4 && ShowOverloads == Diagnostic::Ovl_Best) {
+ break;
+ }
+ ++CandsShown;
+
if (Cand->Function)
NoteFunctionCandidate(*this, Cand, Args, NumArgs);
else if (Cand->IsSurrogate)
NoteSurrogateCandidate(*this, Cand);
-
- // This a builtin candidate. We do not, in general, want to list
- // every possible builtin candidate.
- else if (Cand->Viable) {
+ else {
+ assert(Cand->Viable &&
+ "Non-viable built-in candidates are not added to Cands.");
// Generally we only see ambiguities including viable builtin
// operators if overload resolution got screwed up by an
// ambiguous user-defined conversion.
@@ -5709,6 +6012,9 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet,
NoteBuiltinOperatorCandidate(*this, Opc, OpLoc, Cand);
}
}
+
+ if (I != E)
+ Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
}
static bool CheckUnresolvedAccess(Sema &S, OverloadExpr *E, DeclAccessPair D) {
@@ -5981,7 +6287,8 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From) {
// specified and it, along with any default template arguments,
// identifies a single function template specialization, then the
// template-id is an lvalue for the function template specialization.
- FunctionTemplateDecl *FunctionTemplate = cast<FunctionTemplateDecl>(*I);
+ FunctionTemplateDecl *FunctionTemplate
+ = cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl());
// C++ [over.over]p2:
// If the name is a function template, template argument deduction is
@@ -6159,7 +6466,7 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
Sema::MultiExprArg(SemaRef, (void**) Args, NumArgs),
CommaLocs, RParenLoc);
}
-
+
/// ResolveOverloadedCallFn - Given the call expression that calls Fn
/// (which eventually refers to the declaration Func) and the call
/// arguments Args/NumArgs, attempt to resolve the function call down
@@ -6290,6 +6597,12 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
}
if (Input->isTypeDependent()) {
+ if (Fns.empty())
+ return Owned(new (Context) UnaryOperator(input.takeAs<Expr>(),
+ Opc,
+ Context.DependentTy,
+ OpLoc));
+
CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators
UnresolvedLookupExpr *Fn
= UnresolvedLookupExpr::Create(Context, /*Dependent*/ true, NamingClass,
@@ -6356,7 +6669,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
// Determine the result type
- QualType ResultTy = FnDecl->getResultType().getNonReferenceType();
+ QualType ResultTy = FnDecl->getCallResultType();
// Build the actual expression node.
Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
@@ -6563,8 +6876,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// Determine the result type
QualType ResultTy
- = FnDecl->getType()->getAs<FunctionType>()->getResultType();
- ResultTy = ResultTy.getNonReferenceType();
+ = FnDecl->getType()->getAs<FunctionType>()
+ ->getCallResultType(Context);
// Build the actual expression node.
Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
@@ -6720,8 +7033,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// Determine the result type
QualType ResultTy
- = FnDecl->getType()->getAs<FunctionType>()->getResultType();
- ResultTy = ResultTy.getNonReferenceType();
+ = FnDecl->getType()->getAs<FunctionType>()
+ ->getCallResultType(Context);
// Build the actual expression node.
Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
@@ -6909,7 +7222,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
ExprOwningPtr<CXXMemberCallExpr>
TheCall(this, new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
NumArgs,
- Method->getResultType().getNonReferenceType(),
+ Method->getCallResultType(),
RParenLoc));
// Check for a valid return type.
@@ -7124,7 +7437,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
// Once we've built TheCall, all of the expressions are properly
// owned.
- QualType ResultTy = Method->getResultType().getNonReferenceType();
+ QualType ResultTy = Method->getCallResultType();
ExprOwningPtr<CXXOperatorCallExpr>
TheCall(this, new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn,
MethodArgs, NumArgs + 1,
@@ -7280,7 +7593,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, ExprArg BaseIn, SourceLocation OpLoc) {
SourceLocation());
UsualUnaryConversions(FnExpr);
- QualType ResultTy = Method->getResultType().getNonReferenceType();
+ QualType ResultTy = Method->getCallResultType();
ExprOwningPtr<CXXOperatorCallExpr>
TheCall(this, new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr,
&Base, 1, ResultTy, OpLoc));
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
index 875b160..9c8f48b 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -92,12 +92,6 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (const CXXExprWithTemporaries *Temps = dyn_cast<CXXExprWithTemporaries>(E))
E = Temps->getSubExpr();
- if (const CXXZeroInitValueExpr *Zero = dyn_cast<CXXZeroInitValueExpr>(E)) {
- if (const RecordType *RecordT = Zero->getType()->getAs<RecordType>())
- if (CXXRecordDecl *RecordD = dyn_cast<CXXRecordDecl>(RecordT->getDecl()))
- if (!RecordD->hasTrivialDestructor())
- return;
- }
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
if (E->getType()->isVoidType())
@@ -304,7 +298,7 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, DeclPtrTy CondVar,
DiagnoseUnusedExprResult(elseStmt);
CondResult.release();
- return Owned(new (Context) IfStmt(IfLoc, ConditionVar, ConditionExpr,
+ return Owned(new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
thenStmt, ElseLoc, elseStmt));
}
@@ -400,124 +394,16 @@ static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
/// potentially integral-promoted expression @p expr.
static QualType GetTypeBeforeIntegralPromotion(const Expr* expr) {
- const ImplicitCastExpr *ImplicitCast =
- dyn_cast_or_null<ImplicitCastExpr>(expr);
- if (ImplicitCast != NULL) {
+ if (const CastExpr *ImplicitCast = dyn_cast<ImplicitCastExpr>(expr)) {
const Expr *ExprBeforePromotion = ImplicitCast->getSubExpr();
QualType TypeBeforePromotion = ExprBeforePromotion->getType();
- if (TypeBeforePromotion->isIntegralType()) {
+ if (TypeBeforePromotion->isIntegralOrEnumerationType()) {
return TypeBeforePromotion;
}
}
return expr->getType();
}
-/// \brief Check (and possibly convert) the condition in a switch
-/// statement in C++.
-static bool CheckCXXSwitchCondition(Sema &S, SourceLocation SwitchLoc,
- Expr *&CondExpr) {
- if (CondExpr->isTypeDependent())
- return false;
-
- QualType CondType = CondExpr->getType();
-
- // C++ 6.4.2.p2:
- // The condition shall be of integral type, enumeration type, or of a class
- // type for which a single conversion function to integral or enumeration
- // type exists (12.3). If the condition is of class type, the condition is
- // converted by calling that conversion function, and the result of the
- // conversion is used in place of the original condition for the remainder
- // of this section. Integral promotions are performed.
-
- // Make sure that the condition expression has a complete type,
- // otherwise we'll never find any conversions.
- if (S.RequireCompleteType(SwitchLoc, CondType,
- S.PDiag(diag::err_switch_incomplete_class_type)
- << CondExpr->getSourceRange()))
- return true;
-
- UnresolvedSet<4> ViableConversions;
- UnresolvedSet<4> ExplicitConversions;
- if (const RecordType *RecordTy = CondType->getAs<RecordType>()) {
- const UnresolvedSetImpl *Conversions
- = cast<CXXRecordDecl>(RecordTy->getDecl())
- ->getVisibleConversionFunctions();
- for (UnresolvedSetImpl::iterator I = Conversions->begin(),
- E = Conversions->end(); I != E; ++I) {
- if (CXXConversionDecl *Conversion
- = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl()))
- if (Conversion->getConversionType().getNonReferenceType()
- ->isIntegralType()) {
- if (Conversion->isExplicit())
- ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
- else
- ViableConversions.addDecl(I.getDecl(), I.getAccess());
- }
- }
-
- switch (ViableConversions.size()) {
- case 0:
- if (ExplicitConversions.size() == 1) {
- DeclAccessPair Found = ExplicitConversions[0];
- CXXConversionDecl *Conversion =
- cast<CXXConversionDecl>(Found->getUnderlyingDecl());
- // The user probably meant to invoke the given explicit
- // conversion; use it.
- QualType ConvTy
- = Conversion->getConversionType().getNonReferenceType();
- std::string TypeStr;
- ConvTy.getAsStringInternal(TypeStr, S.Context.PrintingPolicy);
-
- S.Diag(SwitchLoc, diag::err_switch_explicit_conversion)
- << CondType << ConvTy << CondExpr->getSourceRange()
- << FixItHint::CreateInsertion(CondExpr->getLocStart(),
- "static_cast<" + TypeStr + ">(")
- << FixItHint::CreateInsertion(
- S.PP.getLocForEndOfToken(CondExpr->getLocEnd()),
- ")");
- S.Diag(Conversion->getLocation(), diag::note_switch_conversion)
- << ConvTy->isEnumeralType() << ConvTy;
-
- // If we aren't in a SFINAE context, build a call to the
- // explicit conversion function.
- if (S.isSFINAEContext())
- return true;
-
- S.CheckMemberOperatorAccess(CondExpr->getExprLoc(),
- CondExpr, 0, Found);
- CondExpr = S.BuildCXXMemberCallExpr(CondExpr, Found, Conversion);
- }
-
- // We'll complain below about a non-integral condition type.
- break;
-
- case 1: {
- // Apply this conversion.
- DeclAccessPair Found = ViableConversions[0];
- S.CheckMemberOperatorAccess(CondExpr->getExprLoc(),
- CondExpr, 0, Found);
- CondExpr = S.BuildCXXMemberCallExpr(CondExpr, Found,
- cast<CXXConversionDecl>(Found->getUnderlyingDecl()));
- break;
- }
-
- default:
- S.Diag(SwitchLoc, diag::err_switch_multiple_conversions)
- << CondType << CondExpr->getSourceRange();
- for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
- CXXConversionDecl *Conv
- = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
- QualType ConvTy = Conv->getConversionType().getNonReferenceType();
- S.Diag(Conv->getLocation(), diag::note_switch_conversion)
- << ConvTy->isEnumeralType() << ConvTy;
- }
- return true;
- }
- }
-
- return false;
-}
-
Action::OwningStmtResult
Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, ExprArg Cond,
DeclPtrTy CondVar) {
@@ -531,21 +417,32 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, ExprArg Cond,
Cond = move(CondE);
}
- Expr *CondExpr = Cond.takeAs<Expr>();
- if (!CondExpr)
+ if (!Cond.get())
return StmtError();
- if (getLangOptions().CPlusPlus &&
- CheckCXXSwitchCondition(*this, SwitchLoc, CondExpr))
- return StmtError();
-
+ Expr *CondExpr = static_cast<Expr *>(Cond.get());
+ OwningExprResult ConvertedCond
+ = ConvertToIntegralOrEnumerationType(SwitchLoc, move(Cond),
+ PDiag(diag::err_typecheck_statement_requires_integer),
+ PDiag(diag::err_switch_incomplete_class_type)
+ << CondExpr->getSourceRange(),
+ PDiag(diag::err_switch_explicit_conversion),
+ PDiag(diag::note_switch_conversion),
+ PDiag(diag::err_switch_multiple_conversions),
+ PDiag(diag::note_switch_conversion),
+ PDiag(0));
+ if (ConvertedCond.isInvalid())
+ return StmtError();
+
+ CondExpr = ConvertedCond.takeAs<Expr>();
+
if (!CondVar.get()) {
CondExpr = MaybeCreateCXXExprWithTemporaries(CondExpr);
if (!CondExpr)
return StmtError();
}
- SwitchStmt *SS = new (Context) SwitchStmt(ConditionVar, CondExpr);
+ SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, CondExpr);
getSwitchStack().push_back(SS);
return Owned(SS);
}
@@ -584,11 +481,11 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, StmtArg Switch,
// be represented by the promoted type. Therefore we need to find
// the pre-promotion type of the switch condition.
if (!CondExpr->isTypeDependent()) {
- if (!CondType->isIntegerType()) { // C99 6.8.4.2p1
- Diag(SwitchLoc, diag::err_typecheck_statement_requires_integer)
- << CondType << CondExpr->getSourceRange();
+ // We have already converted the expression to an integral or enumeration
+ // type, when we started the switch statement. If we don't have an
+ // appropriate type now, just return an error.
+ if (!CondType->isIntegralOrEnumerationType())
return StmtError();
- }
if (CondExpr->isKnownToHaveBooleanValue()) {
// switch(bool_expr) {...} is often a programmer error, e.g.
@@ -838,6 +735,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, StmtArg Switch,
llvm::APSInt Val = (*EDI)->getInitVal();
if(Val.getBitWidth() < CondWidth)
Val.extend(CondWidth);
+ else if (Val.getBitWidth() > CondWidth)
+ Val.trunc(CondWidth);
Val.setIsSigned(CondIsSigned);
EnumVals.push_back(std::make_pair(Val, (*EDI)));
}
@@ -929,8 +828,8 @@ Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
DiagnoseUnusedExprResult(bodyStmt);
CondResult.release();
- return Owned(new (Context) WhileStmt(ConditionVar, ConditionExpr, bodyStmt,
- WhileLoc));
+ return Owned(new (Context) WhileStmt(Context, ConditionVar, ConditionExpr,
+ bodyStmt, WhileLoc));
}
Action::OwningStmtResult
@@ -999,9 +898,10 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
first.release();
body.release();
- return Owned(new (Context) ForStmt(First, SecondResult.takeAs<Expr>(),
- ConditionVar, Third, Body,
- ForLoc, LParenLoc, RParenLoc));
+ return Owned(new (Context) ForStmt(Context, First,
+ SecondResult.takeAs<Expr>(), ConditionVar,
+ Third, Body, ForLoc, LParenLoc,
+ RParenLoc));
}
Action::OwningStmtResult
@@ -1517,14 +1417,14 @@ Sema::OwningStmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc,
if (InTy->isIntegerType() || InTy->isPointerType())
InputDomain = AD_Int;
- else if (InTy->isFloatingType())
+ else if (InTy->isRealFloatingType())
InputDomain = AD_FP;
else
InputDomain = AD_Other;
if (OutTy->isIntegerType() || OutTy->isPointerType())
OutputDomain = AD_Int;
- else if (OutTy->isFloatingType())
+ else if (OutTy->isRealFloatingType())
OutputDomain = AD_FP;
else
OutputDomain = AD_Other;
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
index 307be9d..f121954 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -27,12 +27,12 @@ using namespace clang;
/// \brief Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
/// returns NULL.
-static NamedDecl *isAcceptableTemplateName(ASTContext &Context, NamedDecl *D) {
- if (!D)
- return 0;
+static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
+ NamedDecl *Orig) {
+ NamedDecl *D = Orig->getUnderlyingDecl();
if (isa<TemplateDecl>(D))
- return D;
+ return Orig;
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
// C++ [temp.local]p1:
@@ -68,7 +68,7 @@ static void FilterAcceptableTemplateNames(ASTContext &C, LookupResult &R) {
LookupResult::Filter filter = R.makeFilter();
while (filter.hasNext()) {
NamedDecl *Orig = filter.next();
- NamedDecl *Repl = isAcceptableTemplateName(C, Orig->getUnderlyingDecl());
+ NamedDecl *Repl = isAcceptableTemplateName(C, Orig);
if (!Repl)
filter.erase();
else if (Repl != Orig) {
@@ -258,9 +258,9 @@ void Sema::LookupTemplateName(LookupResult &Found,
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
if (DeclarationName Corrected = CorrectTypo(Found, S, &SS, LookupCtx,
- false, CTC_CXXCasts)) {
+ false, CTC_CXXCasts)) {
FilterAcceptableTemplateNames(Context, Found);
- if (!Found.empty() && isa<TemplateDecl>(*Found.begin())) {
+ if (!Found.empty()) {
if (LookupCtx)
Diag(Found.getNameLoc(), diag::err_no_member_template_suggest)
<< Name << LookupCtx << Found.getLookupName() << SS.getRange()
@@ -274,10 +274,10 @@ void Sema::LookupTemplateName(LookupResult &Found,
if (TemplateDecl *Template = Found.getAsSingle<TemplateDecl>())
Diag(Template->getLocation(), diag::note_previous_decl)
<< Template->getDeclName();
- } else
- Found.clear();
+ }
} else {
Found.clear();
+ Found.setLookupName(Name);
}
}
@@ -303,7 +303,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
// - if the name is found in the context of the entire
// postfix-expression and does not name a class template, the name
// found in the class of the object expression is used, otherwise
- } else {
+ } else if (!Found.isSuppressingDiagnostics()) {
// - if the name found is a class template, it must refer to the same
// entity as the one found in the class of the object expression,
// otherwise the program is ill-formed.
@@ -311,8 +311,9 @@ void Sema::LookupTemplateName(LookupResult &Found,
Found.getFoundDecl()->getCanonicalDecl()
!= FoundOuter.getFoundDecl()->getCanonicalDecl()) {
Diag(Found.getNameLoc(),
- diag::err_nested_name_member_ref_lookup_ambiguous)
- << Found.getLookupName();
+ diag::ext_nested_name_member_ref_lookup_ambiguous)
+ << Found.getLookupName()
+ << ObjectType;
Diag(Found.getRepresentativeDecl()->getLocation(),
diag::note_ambig_member_ref_object_type)
<< ObjectType;
@@ -458,7 +459,9 @@ Sema::DeclPtrTy Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
- unsigned Depth, unsigned Position) {
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ TypeTy *DefaultArg) {
assert(S->isTemplateParamScope() &&
"Template type parameter not in template parameter scope!");
bool Invalid = false;
@@ -489,42 +492,31 @@ Sema::DeclPtrTy Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
IdResolver.AddDecl(Param);
}
- return DeclPtrTy::make(Param);
-}
-
-/// ActOnTypeParameterDefault - Adds a default argument (the type
-/// Default) to the given template type parameter (TypeParam).
-void Sema::ActOnTypeParameterDefault(DeclPtrTy TypeParam,
- SourceLocation EqualLoc,
- SourceLocation DefaultLoc,
- TypeTy *DefaultT) {
- TemplateTypeParmDecl *Parm
- = cast<TemplateTypeParmDecl>(TypeParam.getAs<Decl>());
-
- TypeSourceInfo *DefaultTInfo;
- GetTypeFromParser(DefaultT, &DefaultTInfo);
-
- assert(DefaultTInfo && "expected source information for type");
-
- // C++0x [temp.param]p9:
- // A default template-argument may be specified for any kind of
- // template-parameter that is not a template parameter pack.
- if (Parm->isParameterPack()) {
- Diag(DefaultLoc, diag::err_template_param_pack_default_arg);
- return;
- }
-
- // C++ [temp.param]p14:
- // A template-parameter shall not be used in its own default argument.
- // FIXME: Implement this check! Needs a recursive walk over the types.
-
- // Check the template argument itself.
- if (CheckTemplateArgument(Parm, DefaultTInfo)) {
- Parm->setInvalidDecl();
- return;
+ // Handle the default argument, if provided.
+ if (DefaultArg) {
+ TypeSourceInfo *DefaultTInfo;
+ GetTypeFromParser(DefaultArg, &DefaultTInfo);
+
+ assert(DefaultTInfo && "expected source information for type");
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (Ellipsis) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ return DeclPtrTy::make(Param);
+ }
+
+ // Check the template argument itself.
+ if (CheckTemplateArgument(Param, DefaultTInfo)) {
+ Param->setInvalidDecl();
+ return DeclPtrTy::make(Param);;
+ }
+
+ Param->setDefaultArgument(DefaultTInfo, false);
}
-
- Parm->setDefaultArgument(DefaultTInfo, false);
+
+ return DeclPtrTy::make(Param);
}
/// \brief Check that the type of a non-type template parameter is
@@ -548,7 +540,7 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
// (optionally cv-qualified) types:
//
// -- integral or enumeration type,
- if (T->isIntegralType() || T->isEnumeralType() ||
+ if (T->isIntegralOrEnumerationType() ||
// -- pointer to object or pointer to function,
(T->isPointerType() &&
(T->getAs<PointerType>()->getPointeeType()->isObjectType() ||
@@ -579,15 +571,13 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
return QualType();
}
-/// ActOnNonTypeTemplateParameter - Called when a C++ non-type
-/// template parameter (e.g., "int Size" in "template<int Size>
-/// class Array") has been parsed. S is the current scope and D is
-/// the parsed declarator.
Sema::DeclPtrTy Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
- unsigned Position) {
- TypeSourceInfo *TInfo = 0;
- QualType T = GetTypeForDeclarator(D, S, &TInfo);
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ExprArg DefaultArg) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
assert(S->isTemplateParamScope() &&
"Non-type template parameter not in template parameter scope!");
@@ -621,34 +611,21 @@ Sema::DeclPtrTy Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
S->AddDecl(DeclPtrTy::make(Param));
IdResolver.AddDecl(Param);
}
- return DeclPtrTy::make(Param);
-}
-
-/// \brief Adds a default argument to the given non-type template
-/// parameter.
-void Sema::ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParamD,
- SourceLocation EqualLoc,
- ExprArg DefaultE) {
- NonTypeTemplateParmDecl *TemplateParm
- = cast<NonTypeTemplateParmDecl>(TemplateParamD.getAs<Decl>());
- Expr *Default = static_cast<Expr *>(DefaultE.get());
-
- // C++ [temp.param]p14:
- // A template-parameter shall not be used in its own default argument.
- // FIXME: Implement this check! Needs a recursive walk over the types.
-
- // Check the well-formedness of the default template argument.
- TemplateArgument Converted;
- if (CheckTemplateArgument(TemplateParm, TemplateParm->getType(), Default,
- Converted)) {
- TemplateParm->setInvalidDecl();
- return;
+
+ // Check the well-formedness of the default template argument, if provided.
+ if (Expr *Default = static_cast<Expr *>(DefaultArg.get())) {
+ TemplateArgument Converted;
+ if (CheckTemplateArgument(Param, Param->getType(), Default, Converted)) {
+ Param->setInvalidDecl();
+ return DeclPtrTy::make(Param);;
+ }
+
+ Param->setDefaultArgument(DefaultArg.takeAs<Expr>(), false);
}
-
- TemplateParm->setDefaultArgument(DefaultE.takeAs<Expr>());
+
+ return DeclPtrTy::make(Param);
}
-
/// ActOnTemplateTemplateParameter - Called when a C++ template template
/// parameter (e.g. T in template <template <typename> class T> class array)
/// has been parsed. S is the current scope.
@@ -658,7 +635,9 @@ Sema::DeclPtrTy Sema::ActOnTemplateTemplateParameter(Scope* S,
IdentifierInfo *Name,
SourceLocation NameLoc,
unsigned Depth,
- unsigned Position) {
+ unsigned Position,
+ SourceLocation EqualLoc,
+ const ParsedTemplateArgument &Default) {
assert(S->isTemplateParamScope() &&
"Template template parameter not in template parameter scope!");
@@ -668,53 +647,33 @@ Sema::DeclPtrTy Sema::ActOnTemplateTemplateParameter(Scope* S,
TmpLoc, Depth, Position, Name,
(TemplateParameterList*)Params);
- // Make sure the parameter is valid.
- // FIXME: Decl object is not currently invalidated anywhere so this doesn't
- // do anything yet. However, if the template parameter list or (eventual)
- // default value is ever invalidated, that will propagate here.
- bool Invalid = false;
- if (Invalid) {
- Param->setInvalidDecl();
- }
-
- // If the tt-param has a name, then link the identifier into the scope
- // and lookup mechanisms.
+ // If the template template parameter has a name, then link the identifier
+ // into the scope and lookup mechanisms.
if (Name) {
S->AddDecl(DeclPtrTy::make(Param));
IdResolver.AddDecl(Param);
}
- return DeclPtrTy::make(Param);
-}
-
-/// \brief Adds a default argument to the given template template
-/// parameter.
-void Sema::ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParamD,
- SourceLocation EqualLoc,
- const ParsedTemplateArgument &Default) {
- TemplateTemplateParmDecl *TemplateParm
- = cast<TemplateTemplateParmDecl>(TemplateParamD.getAs<Decl>());
-
- // C++ [temp.param]p14:
- // A template-parameter shall not be used in its own default argument.
- // FIXME: Implement this check! Needs a recursive walk over the types.
-
- // Check only that we have a template template argument. We don't want to
- // try to check well-formedness now, because our template template parameter
- // might have dependent types in its template parameters, which we wouldn't
- // be able to match now.
- //
- // If none of the template template parameter's template arguments mention
- // other template parameters, we could actually perform more checking here.
- // However, it isn't worth doing.
- TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default);
- if (DefaultArg.getArgument().getAsTemplate().isNull()) {
- Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template)
- << DefaultArg.getSourceRange();
- return;
+ if (!Default.isInvalid()) {
+ // Check only that we have a template template argument. We don't want to
+ // try to check well-formedness now, because our template template parameter
+ // might have dependent types in its template parameters, which we wouldn't
+ // be able to match now.
+ //
+ // If none of the template template parameter's template arguments mention
+ // other template parameters, we could actually perform more checking here.
+ // However, it isn't worth doing.
+ TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default);
+ if (DefaultArg.getArgument().getAsTemplate().isNull()) {
+ Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template)
+ << DefaultArg.getSourceRange();
+ return DeclPtrTy::make(Param);
+ }
+
+ Param->setDefaultArgument(DefaultArg, false);
}
- TemplateParm->setDefaultArgument(DefaultArg);
+ return DeclPtrTy::make(Param);
}
/// ActOnTemplateParameterList - Builds a TemplateParameterList that
@@ -925,7 +884,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
NewClass->setDescribedClassTemplate(NewTemplate);
// Build the type for the class template declaration now.
- QualType T = NewTemplate->getInjectedClassNameSpecialization(Context);
+ QualType T = NewTemplate->getInjectedClassNameSpecialization();
T = Context.getInjectedClassNameType(NewClass, T);
assert(T->isDependentType() && "Class template type is not dependent?");
(void)T;
@@ -1144,7 +1103,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
NewNonTypeParm->getLocation(),
NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
NewNonTypeParm->getDefaultArgument()->Destroy(Context);
- NewNonTypeParm->setDefaultArgument(0);
+ NewNonTypeParm->removeDefaultArgument();
}
// Merge default arguments for non-type template parameters
@@ -1165,7 +1124,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// expression that points to a previous template template
// parameter.
NewNonTypeParm->setDefaultArgument(
- OldNonTypeParm->getDefaultArgument());
+ OldNonTypeParm->getDefaultArgument(),
+ /*Inherited=*/ true);
PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc();
} else if (NewNonTypeParm->hasDefaultArgument()) {
SawDefaultArgument = true;
@@ -1180,7 +1140,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
DiagnoseDefaultTemplateArgument(*this, TPC,
NewTemplateParm->getLocation(),
NewTemplateParm->getDefaultArgument().getSourceRange()))
- NewTemplateParm->setDefaultArgument(TemplateArgumentLoc());
+ NewTemplateParm->removeDefaultArgument();
// Merge default arguments for template template parameters
TemplateTemplateParmDecl *OldTemplateParm
@@ -1199,7 +1159,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
// FIXME: We need to create a new kind of "default argument" expression
// that points to a previous template template parameter.
NewTemplateParm->setDefaultArgument(
- OldTemplateParm->getDefaultArgument());
+ OldTemplateParm->getDefaultArgument(),
+ /*Inherited=*/ true);
PreviousDefaultArgLoc
= OldTemplateParm->getDefaultArgument().getLocation();
} else if (NewTemplateParm->hasDefaultArgument()) {
@@ -1272,7 +1233,8 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
TemplateParameterList **ParamLists,
unsigned NumParamLists,
bool IsFriend,
- bool &IsExplicitSpecialization) {
+ bool &IsExplicitSpecialization,
+ bool &Invalid) {
IsExplicitSpecialization = false;
// Find the template-ids that occur within the nested-name-specifier. These
@@ -1350,6 +1312,7 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
diag::err_template_spec_needs_template_parameters)
<< TemplateId
<< SS.getRange();
+ Invalid = true;
} else {
Diag(SS.getRange().getBegin(), diag::err_template_spec_needs_header)
<< SS.getRange()
@@ -1412,7 +1375,13 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
<< ExplicitSpecializationsInSpecifier.back();
ExplicitSpecializationsInSpecifier.pop_back();
}
-
+
+ // We have a template parameter list with no corresponding scope, which
+ // means that the resulting template declaration can't be instantiated
+ // properly (we'll end up with dependent nodes when we shouldn't).
+ if (!isExplicitSpecHeader)
+ Invalid = true;
+
++Idx;
}
}
@@ -1445,7 +1414,6 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
"Converted template argument list is too short!");
QualType CanonType;
- bool IsCurrentInstantiation = false;
if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
@@ -1502,7 +1470,6 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// class name type of the record we just found.
assert(ICNT.isCanonical());
CanonType = ICNT;
- IsCurrentInstantiation = true;
break;
}
}
@@ -1540,8 +1507,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Build the fully-sugared type for this class template
// specialization, which refers back to the class template
// specialization we created or found.
- return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType,
- IsCurrentInstantiation);
+ return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
}
Action::TypeResult
@@ -1687,12 +1653,18 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
/// example, given "MetaFun::template apply", the scope specifier \p
/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
-Sema::TemplateTy
-Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
- CXXScopeSpec &SS,
- UnqualifiedId &Name,
- TypeTy *ObjectType,
- bool EnteringContext) {
+TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
+ SourceLocation TemplateKWLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ TypeTy *ObjectType,
+ bool EnteringContext,
+ TemplateTy &Result) {
+ if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent() &&
+ !getLangOptions().CPlusPlus0x)
+ Diag(TemplateKWLoc, diag::ext_template_outside_of_template)
+ << FixItHint::CreateRemoval(TemplateKWLoc);
+
DeclContext *LookupCtx = 0;
if (SS.isSet())
LookupCtx = computeDeclContext(SS, EnteringContext);
@@ -1714,26 +1686,25 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
// the "template" keyword prior to a template-name that was not a
// dependent name. C++ DR468 relaxed this requirement (the
// "template" keyword is now permitted). We follow the C++0x
- // rules, even in C++03 mode, retroactively applying the DR.
- TemplateTy Template;
+ // rules, even in C++03 mode with a warning, retroactively applying the DR.
bool MemberOfUnknownSpecialization;
TemplateNameKind TNK = isTemplateName(0, SS, Name, ObjectType,
- EnteringContext, Template,
+ EnteringContext, Result,
MemberOfUnknownSpecialization);
if (TNK == TNK_Non_template && LookupCtx->isDependentContext() &&
isa<CXXRecordDecl>(LookupCtx) &&
cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()) {
- // This is a dependent template.
+ // This is a dependent template. Handle it below.
} else if (TNK == TNK_Non_template) {
Diag(Name.getSourceRange().getBegin(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name)
<< Name.getSourceRange()
<< TemplateKWLoc;
- return TemplateTy();
+ return TNK_Non_template;
} else {
// We found something; return it.
- return Template;
+ return TNK;
}
}
@@ -1742,12 +1713,14 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
switch (Name.getKind()) {
case UnqualifiedId::IK_Identifier:
- return TemplateTy::make(Context.getDependentTemplateName(Qualifier,
- Name.Identifier));
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Name.Identifier));
+ return TNK_Dependent_template_name;
case UnqualifiedId::IK_OperatorFunctionId:
- return TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.OperatorFunctionId.Operator));
+ return TNK_Dependent_template_name;
case UnqualifiedId::IK_LiteralOperatorId:
assert(false && "We don't support these; Parse shouldn't have allowed propagation");
@@ -1761,7 +1734,7 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
<< GetNameFromUnqualifiedId(Name)
<< Name.getSourceRange()
<< TemplateKWLoc;
- return TemplateTy();
+ return TNK_Non_template;
}
bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
@@ -2768,7 +2741,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// conversions (4.7) are applied.
QualType ParamType = InstantiatedParamType;
QualType ArgType = Arg->getType();
- if (ParamType->isIntegralType() || ParamType->isEnumeralType()) {
+ if (ParamType->isIntegralOrEnumerationType()) {
// C++ [temp.arg.nontype]p1:
// A template-argument for a non-type, non-template
// template-parameter shall be one of:
@@ -2778,7 +2751,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- the name of a non-type template-parameter; or
SourceLocation NonConstantLoc;
llvm::APSInt Value;
- if (!ArgType->isIntegralType() && !ArgType->isEnumeralType()) {
+ if (!ArgType->isIntegralOrEnumerationType()) {
Diag(Arg->getSourceRange().getBegin(),
diag::err_template_arg_not_integral_or_enumeral)
<< ArgType << Arg->getSourceRange();
@@ -3237,9 +3210,32 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
return false;
}
- if (isa<TemplateTypeParmDecl>(*OldParm)) {
- // Okay; all template type parameters are equivalent (since we
- // know we're at the same index).
+ if (TemplateTypeParmDecl *OldTTP
+ = dyn_cast<TemplateTypeParmDecl>(*OldParm)) {
+ // Template type parameters are equivalent if either both are template
+ // type parameter packs or neither are (since we know we're at the same
+ // index).
+ TemplateTypeParmDecl *NewTTP = cast<TemplateTypeParmDecl>(*NewParm);
+ if (OldTTP->isParameterPack() != NewTTP->isParameterPack()) {
+ // FIXME: Implement the rules in C++0x [temp.arg.template]p5 that
+ // allow one to match a template parameter pack in the template
+ // parameter list of a template template parameter to one or more
+ // template parameters in the template parameter list of the
+ // corresponding template template argument.
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
+ if (TemplateArgLoc.isValid()) {
+ Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_parameter_pack_non_pack;
+ }
+ Diag(NewTTP->getLocation(), NextDiag)
+ << 0 << NewTTP->isParameterPack();
+ Diag(OldTTP->getLocation(), diag::note_template_parameter_pack_here)
+ << 0 << OldTTP->isParameterPack();
+ }
+ return false;
+ }
} else if (NonTypeTemplateParmDecl *OldNTTP
= dyn_cast<NonTypeTemplateParmDecl>(*OldParm)) {
// The types of non-type template parameters must agree.
@@ -3634,12 +3630,21 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// template.
// FIXME: We probably shouldn't complain about these headers for
// friend declarations.
+ bool Invalid = false;
TemplateParameterList *TemplateParams
= MatchTemplateParametersToScopeSpecifier(TemplateNameLoc, SS,
(TemplateParameterList**)TemplateParameterLists.get(),
TemplateParameterLists.size(),
TUK == TUK_Friend,
- isExplicitSpecialization);
+ isExplicitSpecialization,
+ Invalid);
+ if (Invalid)
+ return true;
+
+ unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size();
+ if (TemplateParams)
+ --NumMatchedTemplateParamLists;
+
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
@@ -3660,7 +3665,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Diag(NTTP->getDefaultArgumentLoc(),
diag::err_default_arg_in_partial_spec)
<< DefArg->getSourceRange();
- NTTP->setDefaultArgument(0);
+ NTTP->removeDefaultArgument();
DefArg->Destroy(Context);
}
} else {
@@ -3669,7 +3674,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Diag(TTP->getDefaultArgument().getLocation(),
diag::err_default_arg_in_partial_spec)
<< TTP->getDefaultArgument().getSourceRange();
- TTP->setDefaultArgument(TemplateArgumentLoc());
+ TTP->removeDefaultArgument();
}
}
}
@@ -3831,6 +3836,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
PrevPartial,
SequenceNumber);
SetNestedNameSpecifier(Partial, SS);
+ if (NumMatchedTemplateParamLists > 0) {
+ Partial->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
if (PrevPartial) {
ClassTemplate->getPartialSpecializations().RemoveNode(PrevPartial);
@@ -3888,6 +3898,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Converted,
PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
+ if (NumMatchedTemplateParamLists > 0) {
+ Specialization->setTemplateParameterListsInfo(Context,
+ NumMatchedTemplateParamLists,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
if (PrevDecl) {
ClassTemplate->getSpecializations().RemoveNode(PrevDecl);
@@ -3955,8 +3970,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TypeSourceInfo *WrittenTy
= Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
TemplateArgs, CanonType);
- if (TUK != TUK_Friend)
+ if (TUK != TUK_Friend) {
Specialization->setTypeAsWritten(WrittenTy);
+ if (TemplateParams)
+ Specialization->setTemplateKeywordLoc(TemplateParams->getTemplateLoc());
+ }
TemplateArgsIn.release();
// C++ [temp.expl.spec]p9:
@@ -4050,7 +4068,7 @@ static void StripImplicitInstantiation(NamedDecl *D) {
/// \param PrevPointOfInstantiation if valid, indicates where the previus
/// declaration was instantiated (either implicitly or explicitly).
///
-/// \param SuppressNew will be set to true to indicate that the new
+/// \param HasNoEffect will be set to true to indicate that the new
/// specialization or instantiation has no effect and should be ignored.
///
/// \returns true if there was an error that should prevent the introduction of
@@ -4061,8 +4079,8 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPointOfInstantiation,
- bool &SuppressNew) {
- SuppressNew = false;
+ bool &HasNoEffect) {
+ HasNoEffect = false;
switch (NewTSK) {
case TSK_Undeclared:
@@ -4119,7 +4137,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
switch (PrevTSK) {
case TSK_ExplicitInstantiationDeclaration:
// This explicit instantiation declaration is redundant (that's okay).
- SuppressNew = true;
+ HasNoEffect = true;
return false;
case TSK_Undeclared:
@@ -4134,7 +4152,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// of a template appears after a declaration of an explicit
// specialization for that template, the explicit instantiation has no
// effect.
- SuppressNew = true;
+ HasNoEffect = true;
return false;
case TSK_ExplicitInstantiationDefinition:
@@ -4148,7 +4166,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
diag::note_explicit_instantiation_definition_here);
assert(PrevPointOfInstantiation.isValid() &&
"Explicit instantiation without point of instantiation?");
- SuppressNew = true;
+ HasNoEffect = true;
return false;
}
break;
@@ -4177,7 +4195,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
Diag(PrevDecl->getLocation(),
diag::note_previous_template_specialization);
}
- SuppressNew = true;
+ HasNoEffect = true;
return false;
case TSK_ExplicitInstantiationDeclaration:
@@ -4194,7 +4212,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
<< PrevDecl;
Diag(PrevPointOfInstantiation,
diag::note_previous_explicit_instantiation);
- SuppressNew = true;
+ HasNoEffect = true;
return false;
}
break;
@@ -4343,14 +4361,14 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
= Specialization->getTemplateSpecializationInfo();
assert(SpecInfo && "Function template specialization info missing?");
- bool SuppressNew = false;
+ bool HasNoEffect = false;
if (!isFriend &&
CheckSpecializationInstantiationRedecl(FD->getLocation(),
TSK_ExplicitSpecialization,
Specialization,
SpecInfo->getTemplateSpecializationKind(),
SpecInfo->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return true;
// Mark the prior declaration as an explicit specialization, so that later
@@ -4477,13 +4495,13 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
// use occurs; no diagnostic is required.
assert(MSInfo && "Member specialization info missing?");
- bool SuppressNew = false;
+ bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(Member->getLocation(),
TSK_ExplicitSpecialization,
Instantiation,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return true;
// Check the scope of this explicit specialization.
@@ -4544,13 +4562,21 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
}
/// \brief Check the scope of an explicit instantiation.
-static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
+///
+/// \returns true if a serious error occurs, false otherwise.
+static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
SourceLocation InstLoc,
bool WasQualifiedName) {
DeclContext *ExpectedContext
= D->getDeclContext()->getEnclosingNamespaceContext()->getLookupContext();
DeclContext *CurContext = S.CurContext->getLookupContext();
+ if (CurContext->isRecord()) {
+ S.Diag(InstLoc, diag::err_explicit_instantiation_in_class)
+ << D;
+ return true;
+ }
+
// C++0x [temp.explicit]p2:
// An explicit instantiation shall appear in an enclosing namespace of its
// template.
@@ -4571,7 +4597,7 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
: diag::warn_explicit_instantiation_out_of_scope_0x)
<< D;
S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
- return;
+ return false;
}
// C++0x [temp.explicit]p2:
@@ -4580,10 +4606,10 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
// its template is declared or, if that namespace is inline (7.3.1), any
// namespace from its enclosing namespace set.
if (WasQualifiedName)
- return;
+ return false;
if (CurContext->Equals(ExpectedContext))
- return;
+ return false;
S.Diag(InstLoc,
S.getLangOptions().CPlusPlus0x?
@@ -4591,6 +4617,7 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
: diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
<< D << ExpectedContext;
S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
+ return false;
}
/// \brief Determine whether the given scope specifier has a template-id in it.
@@ -4685,42 +4712,46 @@ Sema::ActOnExplicitInstantiation(Scope *S,
ClassTemplateSpecializationDecl *PrevDecl
= ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+ TemplateSpecializationKind PrevDecl_TSK
+ = PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
+
// C++0x [temp.explicit]p2:
// [...] An explicit instantiation shall appear in an enclosing
// namespace of its template. [...]
//
// This is C++ DR 275.
- CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
- SS.isSet());
+ if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
+ SS.isSet()))
+ return true;
ClassTemplateSpecializationDecl *Specialization = 0;
bool ReusedDecl = false;
+ bool HasNoEffect = false;
if (PrevDecl) {
- bool SuppressNew = false;
if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK,
- PrevDecl,
- PrevDecl->getSpecializationKind(),
+ PrevDecl, PrevDecl_TSK,
PrevDecl->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return DeclPtrTy::make(PrevDecl);
- if (SuppressNew)
- return DeclPtrTy::make(PrevDecl);
-
- if (PrevDecl->getSpecializationKind() == TSK_ImplicitInstantiation ||
- PrevDecl->getSpecializationKind() == TSK_Undeclared) {
+ // Even though HasNoEffect == true means that this explicit instantiation
+ // has no effect on semantics, we go on to put its syntax in the AST.
+
+ if (PrevDecl_TSK == TSK_ImplicitInstantiation ||
+ PrevDecl_TSK == TSK_Undeclared) {
// Since the only prior class template specialization with these
// arguments was referenced but not declared, reuse that
- // declaration node as our own, updating its source location to
- // reflect our new declaration.
+ // declaration node as our own, updating the source location
+ // for the template name to reflect our new declaration.
+ // (Other source locations will be updated later.)
Specialization = PrevDecl;
Specialization->setLocation(TemplateNameLoc);
PrevDecl = 0;
ReusedDecl = true;
}
}
-
+
if (!Specialization) {
// Create a new class template specialization declaration node for
// this explicit specialization.
@@ -4732,15 +4763,16 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Converted, PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
- if (PrevDecl) {
- // Remove the previous declaration from the folding set, since we want
- // to introduce a new declaration.
- ClassTemplate->getSpecializations().RemoveNode(PrevDecl);
- ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
- }
-
- // Insert the new specialization.
- ClassTemplate->getSpecializations().InsertNode(Specialization, InsertPos);
+ if (!HasNoEffect) {
+ if (PrevDecl) {
+ // Remove the previous declaration from the folding set, since we want
+ // to introduce a new declaration.
+ ClassTemplate->getSpecializations().RemoveNode(PrevDecl);
+ ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+ }
+ // Insert the new specialization.
+ ClassTemplate->getSpecializations().InsertNode(Specialization, InsertPos);
+ }
}
// Build the fully-sugared type for this explicit instantiation as
@@ -4757,12 +4789,21 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Specialization->setTypeAsWritten(WrittenTy);
TemplateArgsIn.release();
- if (!ReusedDecl) {
- // Add the explicit instantiation into its lexical context. However,
- // since explicit instantiations are never found by name lookup, we
- // just put it into the declaration context directly.
- Specialization->setLexicalDeclContext(CurContext);
- CurContext->addDecl(Specialization);
+ // Set source locations for keywords.
+ Specialization->setExternLoc(ExternLoc);
+ Specialization->setTemplateKeywordLoc(TemplateLoc);
+
+ // Add the explicit instantiation into its lexical context. However,
+ // since explicit instantiations are never found by name lookup, we
+ // just put it into the declaration context directly.
+ Specialization->setLexicalDeclContext(CurContext);
+ CurContext->addDecl(Specialization);
+
+ // Syntax is now OK, so return if it has no other effect on semantics.
+ if (HasNoEffect) {
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
+ return DeclPtrTy::make(Specialization);
}
// C++ [temp.explicit]p3:
@@ -4777,8 +4818,10 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Specialization->getDefinition());
if (!Def)
InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK);
- else if (TSK == TSK_ExplicitInstantiationDefinition)
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
MarkVTableUsed(TemplateNameLoc, Specialization, true);
+ Specialization->setPointOfInstantiation(Def->getPointOfInstantiation());
+ }
// Instantiate the members of this class template specialization.
Def = cast_or_null<ClassTemplateSpecializationDecl>(
@@ -4795,6 +4838,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK);
}
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
return DeclPtrTy::make(Specialization);
}
@@ -4847,7 +4892,7 @@ Sema::ActOnExplicitInstantiation(Scope *S,
//
// C++98 has the same restriction, just worded differently.
if (!ScopeSpecifierHasTemplateId(SS))
- Diag(TemplateLoc, diag::err_explicit_instantiation_without_qualified_id)
+ Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id)
<< Record << SS.getRange();
// C++0x [temp.explicit]p2:
@@ -4872,15 +4917,15 @@ Sema::ActOnExplicitInstantiation(Scope *S,
PrevDecl = Record;
if (PrevDecl) {
MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo();
- bool SuppressNew = false;
+ bool HasNoEffect = false;
assert(MSInfo && "No member specialization information?");
if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
PrevDecl,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return true;
- if (SuppressNew)
+ if (HasNoEffect)
return TagD;
}
@@ -4947,7 +4992,8 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
S = S->getParent();
// Determine the type of the declaration.
- QualType R = GetTypeForDeclarator(D, S, 0);
+ TypeSourceInfo *T = GetTypeForDeclarator(D, S);
+ QualType R = T->getType();
if (R.isNull())
return true;
@@ -5019,7 +5065,7 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// C++98 has the same restriction, just worded differently.
if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
Diag(D.getIdentifierLoc(),
- diag::err_explicit_instantiation_without_qualified_id)
+ diag::ext_explicit_instantiation_without_qualified_id)
<< Prev << D.getCXXScopeSpec().getRange();
// Check the scope of this explicit instantiation.
@@ -5028,13 +5074,13 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// Verify that it is okay to explicitly instantiate here.
MemberSpecializationInfo *MSInfo = Prev->getMemberSpecializationInfo();
assert(MSInfo && "Missing static data member specialization info?");
- bool SuppressNew = false;
+ bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return true;
- if (SuppressNew)
+ if (HasNoEffect)
return DeclPtrTy();
// Instantiate static data member.
@@ -5131,17 +5177,17 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
PrevDecl = Specialization;
if (PrevDecl) {
- bool SuppressNew = false;
+ bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK,
PrevDecl,
PrevDecl->getTemplateSpecializationKind(),
PrevDecl->getPointOfInstantiation(),
- SuppressNew))
+ HasNoEffect))
return true;
// FIXME: We may still want to build some representation of this
// explicit specialization.
- if (SuppressNew)
+ if (HasNoEffect)
return DeclPtrTy();
}
@@ -5163,7 +5209,7 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
D.getCXXScopeSpec().isSet() &&
!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
Diag(D.getIdentifierLoc(),
- diag::err_explicit_instantiation_without_qualified_id)
+ diag::ext_explicit_instantiation_without_qualified_id)
<< Specialization << D.getCXXScopeSpec().getRange();
CheckExplicitInstantiationScope(*this,
@@ -5200,31 +5246,20 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
return Context.getDependentNameType(Kwd, NNS, Name).getAsOpaquePtr();
}
-static void FillTypeLoc(DependentNameTypeLoc TL,
- SourceLocation TypenameLoc,
- SourceRange QualifierRange,
- SourceLocation NameLoc) {
- TL.setKeywordLoc(TypenameLoc);
- TL.setQualifierRange(QualifierRange);
- TL.setNameLoc(NameLoc);
-}
-
-static void FillTypeLoc(ElaboratedTypeLoc TL,
- SourceLocation TypenameLoc,
- SourceRange QualifierRange) {
- // FIXME: inner locations.
- TL.setKeywordLoc(TypenameLoc);
- TL.setQualifierRange(QualifierRange);
-}
-
Sema::TypeResult
-Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- const IdentifierInfo &II, SourceLocation IdLoc) {
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc) {
NestedNameSpecifier *NNS
= static_cast<NestedNameSpecifier *>(SS.getScopeRep());
if (!NNS)
return true;
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
+ !getLangOptions().CPlusPlus0x)
+ Diag(TypenameLoc, diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
QualType T = CheckTypenameType(ETK_Typename, NNS, II,
TypenameLoc, SS.getRange(), IdLoc);
if (T.isNull())
@@ -5233,44 +5268,82 @@ Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
if (isa<DependentNameType>(T)) {
DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
- // FIXME: fill inner type loc
- FillTypeLoc(TL, TypenameLoc, SS.getRange(), IdLoc);
+ TL.setKeywordLoc(TypenameLoc);
+ TL.setQualifierRange(SS.getRange());
+ TL.setNameLoc(IdLoc);
} else {
ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
- // FIXME: fill inner type loc
- FillTypeLoc(TL, TypenameLoc, SS.getRange());
+ TL.setKeywordLoc(TypenameLoc);
+ TL.setQualifierRange(SS.getRange());
+ cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(IdLoc);
}
return CreateLocInfoType(T, TSI).getAsOpaquePtr();
}
Sema::TypeResult
-Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
- SourceLocation TemplateLoc, TypeTy *Ty) {
- QualType T = GetTypeFromParser(Ty);
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, SourceLocation TemplateLoc,
+ TypeTy *Ty) {
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() &&
+ !getLangOptions().CPlusPlus0x)
+ Diag(TypenameLoc, diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
+ TypeSourceInfo *InnerTSI = 0;
+ QualType T = GetTypeFromParser(Ty, &InnerTSI);
NestedNameSpecifier *NNS
= static_cast<NestedNameSpecifier *>(SS.getScopeRep());
- const TemplateSpecializationType *TemplateId
- = T->getAs<TemplateSpecializationType>();
- assert(TemplateId && "Expected a template specialization type");
+
+ assert(isa<TemplateSpecializationType>(T) &&
+ "Expected a template specialization type");
if (computeDeclContext(SS, false)) {
// If we can compute a declaration context, then the "typename"
// keyword was superfluous. Just build an ElaboratedType to keep
// track of the nested-name-specifier.
+
+ // Push the inner type, preserving its source locations if possible.
+ TypeLocBuilder Builder;
+ if (InnerTSI)
+ Builder.pushFullCopy(InnerTSI->getTypeLoc());
+ else
+ Builder.push<TemplateSpecializationTypeLoc>(T).initialize(TemplateLoc);
+
T = Context.getElaboratedType(ETK_Typename, NNS, T);
- TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
- ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
- // FIXME: fill inner type loc
- FillTypeLoc(TL, TypenameLoc, SS.getRange());
+ ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
+ TL.setKeywordLoc(TypenameLoc);
+ TL.setQualifierRange(SS.getRange());
+
+ TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
return CreateLocInfoType(T, TSI).getAsOpaquePtr();
}
- T = Context.getDependentNameType(ETK_Typename, NNS, TemplateId);
+ // TODO: it's really silly that we make a template specialization
+ // type earlier only to drop it again here.
+ TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T);
+ DependentTemplateName *DTN =
+ TST->getTemplateName().getAsDependentTemplateName();
+ assert(DTN && "dependent template has non-dependent name?");
+ T = Context.getDependentTemplateSpecializationType(ETK_Typename, NNS,
+ DTN->getIdentifier(),
+ TST->getNumArgs(),
+ TST->getArgs());
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
- DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
- // FIXME: fill inner type loc
- FillTypeLoc(TL, TypenameLoc, SS.getRange(), TemplateLoc);
+ DependentTemplateSpecializationTypeLoc TL =
+ cast<DependentTemplateSpecializationTypeLoc>(TSI->getTypeLoc());
+ if (InnerTSI) {
+ TemplateSpecializationTypeLoc TSTL =
+ cast<TemplateSpecializationTypeLoc>(InnerTSI->getTypeLoc());
+ TL.setLAngleLoc(TSTL.getLAngleLoc());
+ TL.setRAngleLoc(TSTL.getRAngleLoc());
+ for (unsigned I = 0, E = TST->getNumArgs(); I != E; ++I)
+ TL.setArgLocInfo(I, TSTL.getArgLocInfo(I));
+ } else {
+ TL.initializeLocal(SourceLocation());
+ }
+ TL.setKeywordLoc(TypenameLoc);
+ TL.setQualifierRange(SS.getRange());
return CreateLocInfoType(T, TSI).getAsOpaquePtr();
}
@@ -5297,7 +5370,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// the "typename" keyword itself is superfluous. In C++03, the
// program is actually ill-formed. However, DR 382 (in C++0x CD1)
// allows such extraneous "typename" keywords, and we retroactively
- // apply this DR to C++03 code. In any case we continue.
+ // apply this DR to C++03 code with only a warning. In any case we continue.
if (RequireCompleteDeclContext(SS, Ctx))
return QualType();
@@ -5317,7 +5390,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
return Context.getDependentNameType(Keyword, NNS, &II);
case LookupResult::Found:
- if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// We found a type. Build an ElaboratedType, since the
// typename-specifier was just sugar.
return Context.getElaboratedType(ETK_Typename, NNS,
@@ -5399,87 +5472,9 @@ namespace {
Sema::OwningExprResult TransformExpr(Expr *E) {
return getSema().Owned(E->Retain());
}
-
- /// \brief Transforms a typename type by determining whether the type now
- /// refers to a member of the current instantiation, and then
- /// type-checking and building an ElaboratedType (when possible).
- QualType TransformDependentNameType(TypeLocBuilder &TLB,
- DependentNameTypeLoc TL,
- QualType ObjectType);
};
}
-QualType
-CurrentInstantiationRebuilder::TransformDependentNameType(TypeLocBuilder &TLB,
- DependentNameTypeLoc TL,
- QualType ObjectType) {
- DependentNameType *T = TL.getTypePtr();
-
- NestedNameSpecifier *NNS
- = TransformNestedNameSpecifier(T->getQualifier(),
- TL.getQualifierRange(),
- ObjectType);
- if (!NNS)
- return QualType();
-
- // If the nested-name-specifier did not change, and we cannot compute the
- // context corresponding to the nested-name-specifier, then this
- // typename type will not change; exit early.
- CXXScopeSpec SS;
- SS.setRange(TL.getQualifierRange());
- SS.setScopeRep(NNS);
-
- QualType Result;
- if (NNS == T->getQualifier() && getSema().computeDeclContext(SS) == 0)
- Result = QualType(T, 0);
-
- // Rebuild the typename type, which will probably turn into a
- // ElaboratedType.
- else if (const TemplateSpecializationType *TemplateId = T->getTemplateId()) {
- QualType NewTemplateId
- = TransformType(QualType(TemplateId, 0));
- if (NewTemplateId.isNull())
- return QualType();
-
- if (NNS == T->getQualifier() &&
- NewTemplateId == QualType(TemplateId, 0))
- Result = QualType(T, 0);
- else
- Result = getDerived().RebuildDependentNameType(T->getKeyword(),
- NNS, NewTemplateId);
- } else
- Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS,
- T->getIdentifier(),
- TL.getKeywordLoc(),
- TL.getQualifierRange(),
- TL.getNameLoc());
-
- if (Result.isNull())
- return QualType();
-
- if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
- QualType NamedT = ElabT->getNamedType();
- if (isa<TemplateSpecializationType>(NamedT)) {
- TemplateSpecializationTypeLoc NamedTLoc
- = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- // FIXME: fill locations
- NamedTLoc.initializeLocal(TL.getNameLoc());
- } else {
- TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
- }
- ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
- NewTL.setQualifierRange(TL.getQualifierRange());
- }
- else {
- DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
- NewTL.setKeywordLoc(TL.getKeywordLoc());
- NewTL.setQualifierRange(TL.getQualifierRange());
- NewTL.setNameLoc(TL.getNameLoc());
- }
- return Result;
-}
-
/// \brief Rebuilds a type within the context of the current instantiation.
///
/// The type \p T is part of the type of an out-of-line member definition of
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h
index ca59e27..b3f4651 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h
@@ -36,10 +36,14 @@ namespace clang {
/// When instantiating X<int>::Y<17>::f, the multi-level template argument
/// list will contain a template argument list (int) at depth 0 and a
/// template argument list (17) at depth 1.
- struct MultiLevelTemplateArgumentList {
+ class MultiLevelTemplateArgumentList {
+ public:
+ typedef std::pair<const TemplateArgument *, unsigned> ArgList;
+
+ private:
/// \brief The template argument lists, stored from the innermost template
/// argument list (first) to the outermost template argument list (last).
- llvm::SmallVector<const TemplateArgumentList *, 4> TemplateArgumentLists;
+ llvm::SmallVector<ArgList, 4> TemplateArgumentLists;
public:
/// \brief Construct an empty set of template argument lists.
@@ -48,7 +52,7 @@ namespace clang {
/// \brief Construct a single-level template argument list.
explicit
MultiLevelTemplateArgumentList(const TemplateArgumentList &TemplateArgs) {
- TemplateArgumentLists.push_back(&TemplateArgs);
+ addOuterTemplateArguments(&TemplateArgs);
}
/// \brief Determine the number of levels in this template argument
@@ -58,8 +62,8 @@ namespace clang {
/// \brief Retrieve the template argument at a given depth and index.
const TemplateArgument &operator()(unsigned Depth, unsigned Index) const {
assert(Depth < TemplateArgumentLists.size());
- assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1]->size());
- return TemplateArgumentLists[getNumLevels() - Depth - 1]->get(Index);
+ assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].second);
+ return TemplateArgumentLists[getNumLevels() - Depth - 1].first[Index];
}
/// \brief Determine whether there is a non-NULL template argument at the
@@ -69,7 +73,7 @@ namespace clang {
bool hasTemplateArgument(unsigned Depth, unsigned Index) const {
assert(Depth < TemplateArgumentLists.size());
- if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1]->size())
+ if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1].second)
return false;
return !(*this)(Depth, Index).isNull();
@@ -78,12 +82,21 @@ namespace clang {
/// \brief Add a new outermost level to the multi-level template argument
/// list.
void addOuterTemplateArguments(const TemplateArgumentList *TemplateArgs) {
- TemplateArgumentLists.push_back(TemplateArgs);
+ TemplateArgumentLists.push_back(
+ ArgList(TemplateArgs->getFlatArgumentList(),
+ TemplateArgs->flat_size()));
+ }
+
+ /// \brief Add a new outmost level to the multi-level template argument
+ /// list.
+ void addOuterTemplateArguments(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ TemplateArgumentLists.push_back(ArgList(Args, NumArgs));
}
/// \brief Retrieve the innermost template argument list.
- const TemplateArgumentList &getInnermost() const {
- return *TemplateArgumentLists.front();
+ const ArgList &getInnermost() const {
+ return TemplateArgumentLists.front();
}
};
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
index 88ceeca..403d554 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -2624,6 +2624,18 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T,
OnlyDeduced, Depth, Used);
break;
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec
+ = cast<DependentTemplateSpecializationType>(T);
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(SemaRef, Spec->getQualifier(),
+ OnlyDeduced, Depth, Used);
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
+
case Type::TypeOf:
if (!OnlyDeduced)
MarkUsedTemplateParameters(SemaRef,
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 1adf594..0cdc8a1 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -63,7 +63,8 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
// We're done when we hit an explicit specialization.
- if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization)
+ if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(Spec))
break;
Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
@@ -104,6 +105,15 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
RelativeToPrimary = false;
continue;
}
+ } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
+ QualType T = ClassTemplate->getInjectedClassNameSpecialization();
+ const TemplateSpecializationType *TST
+ = cast<TemplateSpecializationType>(Context.getCanonicalType(T));
+ Result.addOuterTemplateArguments(TST->getArgs(), TST->getNumArgs());
+ if (ClassTemplate->isMemberSpecialization())
+ break;
+ }
}
Ctx = Ctx->getParent();
@@ -620,6 +630,14 @@ namespace {
QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateTypeParmTypeLoc TL,
QualType ObjectType);
+
+ Sema::OwningExprResult TransformCallExpr(CallExpr *CE) {
+ getSema().CallsUndergoingInstantiation.push_back(CE);
+ OwningExprResult Result =
+ TreeTransform<TemplateInstantiator>::TransformCallExpr(CE);
+ getSema().CallsUndergoingInstantiation.pop_back();
+ return move(Result);
+ }
};
}
@@ -1049,6 +1067,9 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
+ // Set DeclContext if inside a Block.
+ NewParm->setDeclContext(CurContext);
+
return NewParm;
}
@@ -1216,7 +1237,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
ActOnFields(0, Instantiation->getLocation(), DeclPtrTy::make(Instantiation),
Fields.data(), Fields.size(), SourceLocation(), SourceLocation(),
0);
- CheckCompletedCXXClass(/*Scope=*/0, Instantiation);
+ CheckCompletedCXXClass(Instantiation);
if (Instantiation->isInvalidDecl())
Invalid = true;
@@ -1434,7 +1455,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
SuppressNew)
continue;
- if (Function->getBody())
+ if (Function->hasBody())
continue;
if (TSK == TSK_ExplicitInstantiationDefinition) {
@@ -1444,7 +1465,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
// specialization and is only an explicit instantiation definition
// of members whose definition is visible at the point of
// instantiation.
- if (!Pattern->getBody())
+ if (!Pattern->hasBody())
continue;
Function->setTemplateSpecializationKind(TSK, PointOfInstantiation);
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 834b86d..2fd3528 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -42,12 +42,13 @@ namespace {
// FIXME: Once we get closer to completion, replace these manually-written
// declarations with automatically-generated ones from
- // clang/AST/DeclNodes.def.
+ // clang/AST/DeclNodes.inc.
Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
Decl *VisitTypedefDecl(TypedefDecl *D);
Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
Decl *VisitFieldDecl(FieldDecl *D);
Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
Decl *VisitEnumDecl(EnumDecl *D);
@@ -142,14 +143,29 @@ bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
return false;
}
-// FIXME: Is this too simple?
+// FIXME: Is this still too simple?
void TemplateDeclInstantiator::InstantiateAttrs(Decl *Tmpl, Decl *New) {
- for (const Attr *TmplAttr = Tmpl->getAttrs(); TmplAttr;
+ for (const Attr *TmplAttr = Tmpl->getAttrs(); TmplAttr;
TmplAttr = TmplAttr->getNext()) {
-
+ // FIXME: This should be generalized to more than just the AlignedAttr.
+ if (const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr)) {
+ if (Aligned->isDependent()) {
+ // The alignment expression is not potentially evaluated.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Action::Unevaluated);
+
+ OwningExprResult Result = SemaRef.SubstExpr(Aligned->getAlignmentExpr(),
+ TemplateArgs);
+ if (!Result.isInvalid())
+ // FIXME: Is this the correct source location?
+ SemaRef.AddAlignedAttr(Aligned->getAlignmentExpr()->getExprLoc(),
+ New, Result.takeAs<Expr>());
+ continue;
+ }
+ }
+
// FIXME: Is cloning correct for all attributes?
Attr *NewAttr = TmplAttr->clone(SemaRef.Context);
-
New->addAttr(NewAttr);
}
}
@@ -360,7 +376,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
Var->setLexicalDeclContext(D->getLexicalDeclContext());
Var->setAccess(D->getAccess());
- Var->setUsed(D->isUsed());
+
+ if (!D->isStaticDataMember())
+ Var->setUsed(D->isUsed(false));
// FIXME: In theory, we could have a previous declaration for variables that
// are not static data members.
@@ -373,15 +391,16 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
SemaRef.CheckVariableDeclaration(Var, Previous, Redeclaration);
if (D->isOutOfLine()) {
- D->getLexicalDeclContext()->addDecl(Var);
+ if (!D->isStaticDataMember())
+ D->getLexicalDeclContext()->addDecl(Var);
Owner->makeDeclVisibleInContext(Var);
} else {
Owner->addDecl(Var);
-
if (Owner->isFunctionOrMethod())
SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Var);
}
-
+ InstantiateAttrs(D, Var);
+
// Link instantiations of static data members back to the template from
// which they were instantiated.
if (Var->isStaticDataMember())
@@ -436,6 +455,14 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
return Var;
}
+Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ AccessSpecDecl* AD
+ = AccessSpecDecl::Create(SemaRef.Context, D->getAccess(), Owner,
+ D->getAccessSpecifierLoc(), D->getColonLoc());
+ Owner->addHiddenDecl(AD);
+ return AD;
+}
+
Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
bool Invalid = false;
TypeSourceInfo *DI = D->getTypeSourceInfo();
@@ -793,7 +820,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// Trigger creation of the type for the instantiation.
SemaRef.Context.getInjectedClassNameType(RecordInst,
- Inst->getInjectedClassNameSpecialization(SemaRef.Context));
+ Inst->getInjectedClassNameSpecialization());
// Finish handling of friends.
if (isFriend) {
@@ -951,9 +978,10 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
void *InsertPos = 0;
if (FunctionTemplate && !TemplateParams) {
llvm::FoldingSetNodeID ID;
- FunctionTemplateSpecializationInfo::Profile(ID,
- TemplateArgs.getInnermost().getFlatArgumentList(),
- TemplateArgs.getInnermost().flat_size(),
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+ FunctionTemplateSpecializationInfo::Profile(ID, Innermost.first,
+ Innermost.second,
SemaRef.Context);
FunctionTemplateSpecializationInfo *Info
@@ -1062,8 +1090,12 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
}
} else if (FunctionTemplate) {
// Record this function template specialization.
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
Function->setFunctionTemplateSpecialization(FunctionTemplate,
- &TemplateArgs.getInnermost(),
+ new (SemaRef.Context) TemplateArgumentList(SemaRef.Context,
+ Innermost.first,
+ Innermost.second),
InsertPos);
} else if (isFriend && D->isThisDeclarationADefinition()) {
// TODO: should we remember this connection regardless of whether
@@ -1154,7 +1186,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
D->isThisDeclarationADefinition()) {
// Check for a function body.
const FunctionDecl *Definition = 0;
- if (Function->getBody(Definition) &&
+ if (Function->hasBody(Definition) &&
Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
<< Function->getDeclName();
@@ -1170,7 +1202,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
((*R)->getFriendObjectKind() != Decl::FOK_None)) {
if (const FunctionDecl *RPattern
= (*R)->getTemplateInstantiationPattern())
- if (RPattern->getBody(RPattern)) {
+ if (RPattern->hasBody(RPattern)) {
SemaRef.Diag(Function->getLocation(), diag::err_redefinition)
<< Function->getDeclName();
SemaRef.Diag((*R)->getLocation(), diag::note_previous_definition);
@@ -1200,9 +1232,10 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
// template. Check whether there is already a function template
// specialization for this particular set of template arguments.
llvm::FoldingSetNodeID ID;
- FunctionTemplateSpecializationInfo::Profile(ID,
- TemplateArgs.getInnermost().getFlatArgumentList(),
- TemplateArgs.getInnermost().flat_size(),
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+ FunctionTemplateSpecializationInfo::Profile(ID, Innermost.first,
+ Innermost.second,
SemaRef.Context);
FunctionTemplateSpecializationInfo *Info
@@ -1347,8 +1380,12 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
Method->setDescribedFunctionTemplate(FunctionTemplate);
} else if (FunctionTemplate) {
// Record this function template specialization.
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
Method->setFunctionTemplateSpecialization(FunctionTemplate,
- &TemplateArgs.getInnermost(),
+ new (SemaRef.Context) TemplateArgumentList(SemaRef.Context,
+ Innermost.first,
+ Innermost.second),
InsertPos);
} else if (!isFriend) {
// Record that this is an instantiation of a member function.
@@ -1485,7 +1522,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (Invalid)
Param->setInvalidDecl();
- Param->setDefaultArgument(D->getDefaultArgument());
+ Param->setDefaultArgument(D->getDefaultArgument(), false);
// Introduce this template parameter's instantiation into the instantiation
// scope.
@@ -1513,7 +1550,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
= TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
D->getDepth() - 1, D->getPosition(),
D->getIdentifier(), InstParams);
- Param->setDefaultArgument(D->getDefaultArgument());
+ Param->setDefaultArgument(D->getDefaultArgument(), false);
// Introduce this template parameter's instantiation into the instantiation
// scope.
@@ -1966,6 +2003,8 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
Proto->getExtInfo()));
}
+ InstantiateAttrs(Tmpl, New);
+
return false;
}
@@ -2011,7 +2050,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive,
bool DefinitionRequired) {
- if (Function->isInvalidDecl() || Function->getBody())
+ if (Function->isInvalidDecl() || Function->hasBody())
return;
// Never instantiate an explicit specialization.
@@ -2568,7 +2607,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate();
if (ClassTemplate) {
- T = ClassTemplate->getInjectedClassNameSpecialization(Context);
+ T = ClassTemplate->getInjectedClassNameSpecialization();
} else if (ClassTemplatePartialSpecializationDecl *PartialSpec
= dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) {
ClassTemplate = PartialSpec->getSpecializedTemplate();
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
index 35efa61..a4fc98c 100644
--- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -383,8 +383,12 @@ static QualType ConvertDeclSpecToType(Sema &TheSema,
} else if (DS.isTypeAltiVecVector()) {
unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
- Result = Context.getVectorType(Result, 128/typeSize, true,
- DS.isTypeAltiVecPixel());
+ VectorType::AltiVecSpecific AltiVecSpec = VectorType::AltiVec;
+ if (DS.isTypeAltiVecPixel())
+ AltiVecSpec = VectorType::Pixel;
+ else if (DS.isTypeAltiVecBool())
+ AltiVecSpec = VectorType::Bool;
+ Result = Context.getVectorType(Result, 128/typeSize, AltiVecSpec);
}
assert(DS.getTypeSpecComplex() != DeclSpec::TSC_imaginary &&
@@ -472,12 +476,49 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) {
return "type name";
}
+QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
+ Qualifiers Qs) {
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if (Qs.hasRestrict()) {
+ unsigned DiagID = 0;
+ QualType ProblemTy;
+
+ const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
+ if (const ReferenceType *RTy = dyn_cast<ReferenceType>(Ty)) {
+ if (!RTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<ReferenceType>()->getPointeeType();
+ }
+ } else if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (!PTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<PointerType>()->getPointeeType();
+ }
+ } else if (const MemberPointerType *PTy = dyn_cast<MemberPointerType>(Ty)) {
+ if (!PTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<PointerType>()->getPointeeType();
+ }
+ } else if (!Ty->isDependentType()) {
+ // FIXME: this deserves a proper diagnostic
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T;
+ }
+
+ if (DiagID) {
+ Diag(Loc, DiagID) << ProblemTy;
+ Qs.removeRestrict();
+ }
+ }
+
+ return Context.getQualifiedType(T, Qs);
+}
+
/// \brief Build a pointer type.
///
/// \param T The type to which we'll be building a pointer.
///
-/// \param Quals The cvr-qualifiers to be applied to the pointer type.
-///
/// \param Loc The location of the entity whose type involves this
/// pointer type or, if there is no such entity, the location of the
/// type that will have pointer type.
@@ -487,7 +528,7 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) {
///
/// \returns A suitable pointer type, if there are no
/// errors. Otherwise, returns a NULL type.
-QualType Sema::BuildPointerType(QualType T, unsigned Quals,
+QualType Sema::BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity) {
if (T->isReferenceType()) {
// C++ 8.3.2p4: There shall be no ... pointers to references ...
@@ -496,28 +537,16 @@ QualType Sema::BuildPointerType(QualType T, unsigned Quals,
return QualType();
}
- Qualifiers Qs = Qualifiers::fromCVRMask(Quals);
-
- // Enforce C99 6.7.3p2: "Types other than pointer types derived from
- // object or incomplete types shall not be restrict-qualified."
- if (Qs.hasRestrict() && !T->isIncompleteOrObjectType()) {
- Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
- << T;
- Qs.removeRestrict();
- }
-
assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
// Build the pointer type.
- return Context.getQualifiedType(Context.getPointerType(T), Qs);
+ return Context.getPointerType(T);
}
/// \brief Build a reference type.
///
/// \param T The type to which we'll be building a reference.
///
-/// \param CVR The cvr-qualifiers to be applied to the reference type.
-///
/// \param Loc The location of the entity whose type involves this
/// reference type or, if there is no such entity, the location of the
/// type that will have reference type.
@@ -528,10 +557,8 @@ QualType Sema::BuildPointerType(QualType T, unsigned Quals,
/// \returns A suitable reference type, if there are no
/// errors. Otherwise, returns a NULL type.
QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
- unsigned CVR, SourceLocation Loc,
+ SourceLocation Loc,
DeclarationName Entity) {
- Qualifiers Quals = Qualifiers::fromCVRMask(CVR);
-
bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
// C++0x [dcl.typedef]p9: If a typedef TD names a type that is a
@@ -562,31 +589,10 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return QualType();
}
- // Enforce C99 6.7.3p2: "Types other than pointer types derived from
- // object or incomplete types shall not be restrict-qualified."
- if (Quals.hasRestrict() && !T->isIncompleteOrObjectType()) {
- Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
- << T;
- Quals.removeRestrict();
- }
-
- // C++ [dcl.ref]p1:
- // [...] Cv-qualified references are ill-formed except when the
- // cv-qualifiers are introduced through the use of a typedef
- // (7.1.3) or of a template type argument (14.3), in which case
- // the cv-qualifiers are ignored.
- //
- // We diagnose extraneous cv-qualifiers for the non-typedef,
- // non-template type argument case within the parser. Here, we just
- // ignore any extraneous cv-qualifiers.
- Quals.removeConst();
- Quals.removeVolatile();
-
// Handle restrict on references.
if (LValueRef)
- return Context.getQualifiedType(
- Context.getLValueReferenceType(T, SpelledAsLValue), Quals);
- return Context.getQualifiedType(Context.getRValueReferenceType(T), Quals);
+ return Context.getLValueReferenceType(T, SpelledAsLValue);
+ return Context.getRValueReferenceType(T);
}
/// \brief Build an array type.
@@ -597,9 +603,6 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
///
/// \param ArraySize Expression describing the size of the array.
///
-/// \param Quals The cvr-qualifiers to be applied to the array's
-/// element type.
-///
/// \param Loc The location of the entity whose type involves this
/// array type or, if there is no such entity, the location of the
/// type that will have array type.
@@ -815,7 +818,7 @@ QualType Sema::BuildFunctionType(QualType T,
<< T->isFunctionType() << T;
return QualType();
}
-
+
bool Invalid = false;
for (unsigned Idx = 0; Idx < NumParamTypes; ++Idx) {
QualType ParamType = adjustParameterType(ParamTypes[Idx]);
@@ -846,10 +849,8 @@ QualType Sema::BuildFunctionType(QualType T,
/// \returns a member pointer type, if successful, or a NULL type if there was
/// an error.
QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
- unsigned CVR, SourceLocation Loc,
+ SourceLocation Loc,
DeclarationName Entity) {
- Qualifiers Quals = Qualifiers::fromCVRMask(CVR);
-
// Verify that we're not building a pointer to pointer to function with
// exception specification.
if (CheckDistantExceptionSpec(T)) {
@@ -863,7 +864,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
T = Context.getCanonicalType(T);
}
- // C++ 8.3.3p3: A pointer to member shall not pointer to ... a member
+ // C++ 8.3.3p3: A pointer to member shall not point to ... a member
// with reference type, or "cv void."
if (T->isReferenceType()) {
Diag(Loc, diag::err_illegal_decl_mempointer_to_reference)
@@ -877,24 +878,12 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return QualType();
}
- // Enforce C99 6.7.3p2: "Types other than pointer types derived from
- // object or incomplete types shall not be restrict-qualified."
- if (Quals.hasRestrict() && !T->isIncompleteOrObjectType()) {
- Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
- << T;
-
- // FIXME: If we're doing this as part of template instantiation,
- // we should return immediately.
- Quals.removeRestrict();
- }
-
if (!Class->isDependentType() && !Class->isRecordType()) {
Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class;
return QualType();
}
- return Context.getQualifiedType(
- Context.getMemberPointerType(T, Class.getTypePtr()), Quals);
+ return Context.getMemberPointerType(T, Class.getTypePtr());
}
/// \brief Build a block pointer type.
@@ -912,7 +901,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
///
/// \returns A suitable block pointer type, if there are no
/// errors. Otherwise, returns a NULL type.
-QualType Sema::BuildBlockPointerType(QualType T, unsigned CVR,
+QualType Sema::BuildBlockPointerType(QualType T,
SourceLocation Loc,
DeclarationName Entity) {
if (!T->isFunctionType()) {
@@ -920,8 +909,7 @@ QualType Sema::BuildBlockPointerType(QualType T, unsigned CVR,
return QualType();
}
- Qualifiers Quals = Qualifiers::fromCVRMask(CVR);
- return Context.getQualifiedType(Context.getBlockPointerType(T), Quals);
+ return Context.getBlockPointerType(T);
}
QualType Sema::GetTypeFromParser(TypeTy *Ty, TypeSourceInfo **TInfo) {
@@ -947,9 +935,11 @@ QualType Sema::GetTypeFromParser(TypeTy *Ty, TypeSourceInfo **TInfo) {
/// If OwnedDecl is non-NULL, and this declarator's decl-specifier-seq
/// owns the declaration of a type (e.g., the definition of a struct
/// type), then *OwnedDecl will receive the owned declaration.
-QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
- TypeSourceInfo **TInfo,
- TagDecl **OwnedDecl) {
+///
+/// The result of this call will never be null, but the associated
+/// type may be a null type if there's an unrecoverable error.
+TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
+ TagDecl **OwnedDecl) {
// Determine the type of the declarator. Not all forms of declarator
// have a type.
QualType T;
@@ -980,22 +970,18 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// Constructors and destructors don't have return types. Use
// "void" instead.
T = Context.VoidTy;
-
- if (TInfo)
- ReturnTypeInfo = Context.getTrivialTypeSourceInfo(T,
- D.getName().StartLocation);
break;
case UnqualifiedId::IK_ConversionFunctionId:
// The result type of a conversion function is the type that it
// converts to.
T = GetTypeFromParser(D.getName().ConversionFunctionId,
- TInfo? &ReturnTypeInfo : 0);
+ &ReturnTypeInfo);
break;
}
if (T.isNull())
- return T;
+ return Context.getNullTypeSourceInfo();
if (T == Context.UndeducedAutoTy) {
int Error = -1;
@@ -1059,8 +1045,9 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
if (!LangOpts.Blocks)
Diag(DeclType.Loc, diag::err_blocks_disable);
- T = BuildBlockPointerType(T, DeclType.Cls.TypeQuals, D.getIdentifierLoc(),
- Name);
+ T = BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
+ if (DeclType.Cls.TypeQuals)
+ T = BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
break;
case DeclaratorChunk::Pointer:
// Verify that we're not building a pointer to pointer to function with
@@ -1072,15 +1059,15 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
if (getLangOptions().ObjC1 && T->getAs<ObjCObjectType>()) {
T = Context.getObjCObjectPointerType(T);
- T = Context.getCVRQualifiedType(T, DeclType.Ptr.TypeQuals);
+ if (DeclType.Ptr.TypeQuals)
+ T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
}
- T = BuildPointerType(T, DeclType.Ptr.TypeQuals, DeclType.Loc, Name);
+ T = BuildPointerType(T, DeclType.Loc, Name);
+ if (DeclType.Ptr.TypeQuals)
+ T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
break;
case DeclaratorChunk::Reference: {
- Qualifiers Quals;
- if (DeclType.Ref.HasRestrict) Quals.addRestrict();
-
// Verify that we're not building a reference to pointer to function with
// exception specification.
if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
@@ -1088,8 +1075,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
D.setInvalidType(true);
// Build the type anyway.
}
- T = BuildReferenceType(T, DeclType.Ref.LValueRef, Quals,
- DeclType.Loc, Name);
+ T = BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
+
+ Qualifiers Quals;
+ if (DeclType.Ref.HasRestrict)
+ T = BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
break;
}
case DeclaratorChunk::Array: {
@@ -1139,6 +1129,48 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
D.setInvalidType(true);
}
+ // cv-qualifiers on return types are pointless except when the type is a
+ // class type in C++.
+ if (T.getCVRQualifiers() && D.getDeclSpec().getTypeQualifiers() &&
+ (!getLangOptions().CPlusPlus ||
+ (!T->isDependentType() && !T->isRecordType()))) {
+ unsigned Quals = D.getDeclSpec().getTypeQualifiers();
+ std::string QualStr;
+ unsigned NumQuals = 0;
+ SourceLocation Loc;
+ if (Quals & Qualifiers::Const) {
+ Loc = D.getDeclSpec().getConstSpecLoc();
+ ++NumQuals;
+ QualStr = "const";
+ }
+ if (Quals & Qualifiers::Volatile) {
+ if (NumQuals == 0) {
+ Loc = D.getDeclSpec().getVolatileSpecLoc();
+ QualStr = "volatile";
+ } else
+ QualStr += " volatile";
+ ++NumQuals;
+ }
+ if (Quals & Qualifiers::Restrict) {
+ if (NumQuals == 0) {
+ Loc = D.getDeclSpec().getRestrictSpecLoc();
+ QualStr = "restrict";
+ } else
+ QualStr += " restrict";
+ ++NumQuals;
+ }
+ assert(NumQuals > 0 && "No known qualifiers?");
+
+ SemaDiagnosticBuilder DB = Diag(Loc, diag::warn_qual_return_type);
+ DB << QualStr << NumQuals;
+ if (Quals & Qualifiers::Const)
+ DB << FixItHint::CreateRemoval(D.getDeclSpec().getConstSpecLoc());
+ if (Quals & Qualifiers::Volatile)
+ DB << FixItHint::CreateRemoval(D.getDeclSpec().getVolatileSpecLoc());
+ if (Quals & Qualifiers::Restrict)
+ DB << FixItHint::CreateRemoval(D.getDeclSpec().getRestrictSpecLoc());
+ }
+
if (getLangOptions().CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
@@ -1154,29 +1186,14 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
Diag(FTI.getThrowLoc(), diag::err_exception_spec_in_typedef);
- if (FTI.NumArgs == 0) {
- if (getLangOptions().CPlusPlus) {
- // C++ 8.3.5p2: If the parameter-declaration-clause is empty, the
- // function takes no arguments.
- llvm::SmallVector<QualType, 4> Exceptions;
- Exceptions.reserve(FTI.NumExceptions);
- for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
- // FIXME: Preserve type source info.
- QualType ET = GetTypeFromParser(FTI.Exceptions[ei].Ty);
- // Check that the type is valid for an exception spec, and drop it
- // if not.
- if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
- Exceptions.push_back(ET);
- }
- T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, FTI.TypeQuals,
- FTI.hasExceptionSpec,
- FTI.hasAnyExceptionSpec,
- Exceptions.size(), Exceptions.data(),
- FunctionType::ExtInfo());
- } else if (FTI.isVariadic) {
- // We allow a zero-parameter variadic function in C if the
- // function is marked with the "overloadable"
- // attribute. Scan for this attribute now.
+ if (!FTI.NumArgs && !FTI.isVariadic && !getLangOptions().CPlusPlus) {
+ // Simple void foo(), where the incoming T is the result type.
+ T = Context.getFunctionNoProtoType(T);
+ } else {
+ // We allow a zero-parameter variadic function in C if the
+ // function is marked with the "overloadable" attribute. Scan
+ // for this attribute now.
+ if (!FTI.NumArgs && FTI.isVariadic && !getLangOptions().CPlusPlus) {
bool Overloadable = false;
for (const AttributeList *Attrs = D.getAttributes();
Attrs; Attrs = Attrs->getNext()) {
@@ -1188,21 +1205,20 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
if (!Overloadable)
Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
- T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, 0,
- false, false, 0, 0,
- FunctionType::ExtInfo());
- } else {
- // Simple void foo(), where the incoming T is the result type.
- T = Context.getFunctionNoProtoType(T);
}
- } else if (FTI.ArgInfo[0].Param == 0) {
- // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function definition.
- Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
- D.setInvalidType(true);
- } else {
+
+ if (FTI.NumArgs && FTI.ArgInfo[0].Param == 0) {
+ // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
+ // definition.
+ Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
+ D.setInvalidType(true);
+ break;
+ }
+
// Otherwise, we have a function with an argument list that is
// potentially variadic.
llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.reserve(FTI.NumArgs);
for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
ParmVarDecl *Param =
@@ -1278,13 +1294,6 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
break;
}
case DeclaratorChunk::MemberPointer:
- // Verify that we're not building a pointer to pointer to function with
- // exception specification.
- if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
- Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
- D.setInvalidType(true);
- // Build the type anyway.
- }
// The scope spec must refer to a class, or be dependent.
QualType ClsType;
if (DeclType.Mem.Scope().isInvalid()) {
@@ -1323,11 +1332,12 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
if (!ClsType.isNull())
- T = BuildMemberPointerType(T, ClsType, DeclType.Mem.TypeQuals,
- DeclType.Loc, D.getIdentifier());
+ T = BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier());
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
+ } else if (DeclType.Mem.TypeQuals) {
+ T = BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
}
break;
}
@@ -1352,18 +1362,19 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
// for a nonstatic member function, the function type to which a pointer
// to member refers, or the top-level function type of a function typedef
// declaration.
+ bool FreeFunction = (D.getContext() != Declarator::MemberContext &&
+ (!D.getCXXScopeSpec().isSet() ||
+ !computeDeclContext(D.getCXXScopeSpec(), /*FIXME:*/true)->isRecord()));
if (FnTy->getTypeQuals() != 0 &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
- ((D.getContext() != Declarator::MemberContext &&
- (!D.getCXXScopeSpec().isSet() ||
- !computeDeclContext(D.getCXXScopeSpec(), /*FIXME:*/true)
- ->isRecord())) ||
+ (FreeFunction ||
D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) {
if (D.isFunctionDeclarator())
Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_function_type);
else
Diag(D.getIdentifierLoc(),
- diag::err_invalid_qualified_typedef_function_type_use);
+ diag::err_invalid_qualified_typedef_function_type_use)
+ << FreeFunction;
// Strip the cv-quals from the type.
T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(),
@@ -1372,6 +1383,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
}
}
+ // If there's a constexpr specifier, treat it as a top-level const.
+ if (D.getDeclSpec().isConstexprSpecified()) {
+ T.addConst();
+ }
+
// Process any function attributes we might have delayed from the
// declaration-specifiers.
ProcessDelayedFnAttrs(*this, T, FnAttrsFromDeclSpec);
@@ -1386,14 +1402,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S,
DiagnoseDelayedFnAttrs(*this, FnAttrsFromPreviousChunk);
- if (TInfo) {
- if (D.isInvalidType())
- *TInfo = 0;
- else
- *TInfo = GetTypeSourceInfoForDeclarator(D, T, ReturnTypeInfo);
- }
-
- return T;
+ if (T.isNull())
+ return Context.getNullTypeSourceInfo();
+ else if (D.isInvalidType())
+ return Context.getTrivialTypeSourceInfo(T);
+ return GetTypeSourceInfoForDeclarator(D, T, ReturnTypeInfo);
}
namespace {
@@ -1527,6 +1540,28 @@ namespace {
// FIXME: load appropriate source location.
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
+ void VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
+ if (Keyword == ETK_Typename) {
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getTypeRep(), &TInfo);
+ if (TInfo) {
+ TL.copy(cast<DependentTemplateSpecializationTypeLoc>(
+ TInfo->getTypeLoc()));
+ return;
+ }
+ }
+ TL.initializeLocal(SourceLocation());
+ TL.setKeywordLoc(Keyword != ETK_None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
+ const CXXScopeSpec& SS = DS.getTypeSpecScope();
+ TL.setQualifierRange(SS.isEmpty() ? SourceRange() : SS.getRange());
+ // FIXME: load appropriate source location.
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
void VisitTypeLoc(TypeLoc TL) {
// FIXME: add other typespec types and change this to an assert.
@@ -1651,53 +1686,14 @@ void LocInfoType::getAsStringInternal(std::string &Str,
" GetTypeFromParser");
}
-/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
-/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
-/// they point to and return true. If T1 and T2 aren't pointer types
-/// or pointer-to-member types, or if they are not similar at this
-/// level, returns false and leaves T1 and T2 unchanged. Top-level
-/// qualifiers on T1 and T2 are ignored. This function will typically
-/// be called in a loop that successively "unwraps" pointer and
-/// pointer-to-member types to compare them at each level.
-bool Sema::UnwrapSimilarPointerTypes(QualType& T1, QualType& T2) {
- const PointerType *T1PtrType = T1->getAs<PointerType>(),
- *T2PtrType = T2->getAs<PointerType>();
- if (T1PtrType && T2PtrType) {
- T1 = T1PtrType->getPointeeType();
- T2 = T2PtrType->getPointeeType();
- return true;
- }
-
- const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
- *T2MPType = T2->getAs<MemberPointerType>();
- if (T1MPType && T2MPType &&
- Context.getCanonicalType(T1MPType->getClass()) ==
- Context.getCanonicalType(T2MPType->getClass())) {
- T1 = T1MPType->getPointeeType();
- T2 = T2MPType->getPointeeType();
- return true;
- }
-
- if (getLangOptions().ObjC1) {
- const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
- *T2OPType = T2->getAs<ObjCObjectPointerType>();
- if (T1OPType && T2OPType) {
- T1 = T1OPType->getPointeeType();
- T2 = T2OPType->getPointeeType();
- return true;
- }
- }
- return false;
-}
-
Sema::TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
// C99 6.7.6: Type names have no identifier. This is already validated by
// the parser.
assert(D.getIdentifier() == 0 && "Type name should have no identifier!");
- TypeSourceInfo *TInfo = 0;
TagDecl *OwnedTag = 0;
- QualType T = GetTypeForDeclarator(D, S, &TInfo, &OwnedTag);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag);
+ QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
@@ -1714,9 +1710,7 @@ Sema::TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
<< Context.getTypeDeclType(OwnedTag);
}
- if (TInfo)
- T = CreateLocInfoType(T, TInfo);
-
+ T = CreateLocInfoType(T, TInfo);
return T.getAsOpaquePtr();
}
@@ -1934,7 +1928,8 @@ bool ProcessFnAttr(Sema &S, QualType &Type, const AttributeList &Attr) {
/// The raw attribute should contain precisely 1 argument, the vector size for
/// the variable, measured in bytes. If curType and rawAttr are well formed,
/// this routine will return a new vector type.
-static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, Sema &S) {
+static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
+ Sema &S) {
// Check the attribute arugments.
if (Attr.getNumArgs() != 1) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
@@ -1977,7 +1972,8 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, S
// Success! Instantiate the vector type, the number of elements is > 0, and
// not required to be a power of 2, unlike GCC.
- CurType = S.Context.getVectorType(CurType, vectorSize/typeSize, false, false);
+ CurType = S.Context.getVectorType(CurType, vectorSize/typeSize,
+ VectorType::NotAltiVec);
}
void ProcessTypeAttributeList(Sema &S, QualType &Result,
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
index a18701e..17103c5 100644
--- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -442,7 +442,7 @@ public:
/// By default, performs semantic analysis when building the vector type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
- bool IsAltiVec, bool IsPixel);
+ VectorType::AltiVecSpecific AltiVecSpec);
/// \brief Build a new extended vector type given the element type and
/// number of elements.
@@ -533,16 +533,30 @@ public:
/// By default, builds a new DependentNameType type from the
/// nested-name-specifier and the given type. Subclasses may override
/// this routine to provide different behavior.
- QualType RebuildDependentNameType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS, QualType T) {
- if (NNS->isDependent()) {
- // If the name is still dependent, just build a new dependent name type.
- CXXScopeSpec SS;
- SS.setScopeRep(NNS);
- if (!SemaRef.computeDeclContext(SS))
- return SemaRef.Context.getDependentNameType(Keyword, NNS,
- cast<TemplateSpecializationType>(T));
- }
+ QualType RebuildDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &Args) {
+ // Rebuild the template name.
+ // TODO: avoid TemplateName abstraction
+ TemplateName InstName =
+ getDerived().RebuildTemplateName(NNS, *Name, QualType());
+
+ if (InstName.isNull())
+ return QualType();
+
+ // If it's still dependent, make a dependent specialization.
+ if (InstName.getAsDependentTemplateName())
+ return SemaRef.Context.getDependentTemplateSpecializationType(
+ Keyword, NNS, Name, Args);
+
+ // Otherwise, make an elaborated type wrapping a non-dependent
+ // specialization.
+ QualType T =
+ getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
+ if (T.isNull()) return QualType();
return SemaRef.Context.getElaboratedType(Keyword, NNS, T);
}
@@ -1160,7 +1174,9 @@ public:
SS.setScopeRep(Qualifier);
}
- QualType BaseType = ((Expr*) Base.get())->getType();
+ Expr *BaseExpr = Base.takeAs<Expr>();
+ getSema().DefaultFunctionArrayConversion(BaseExpr);
+ QualType BaseType = BaseExpr->getType();
// FIXME: this involves duplicating earlier analysis in a lot of
// cases; we should avoid this when possible.
@@ -1169,8 +1185,8 @@ public:
R.addDecl(FoundDecl);
R.resolveKind();
- return getSema().BuildMemberReferenceExpr(move(Base), BaseType,
- OpLoc, isArrow,
+ return getSema().BuildMemberReferenceExpr(getSema().Owned(BaseExpr),
+ BaseType, OpLoc, isArrow,
SS, FirstQualifierInScope,
R, ExplicitTemplateArgs);
}
@@ -1561,7 +1577,7 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- OwningExprResult RebuildCXXZeroInitValueExpr(SourceLocation TypeStartLoc,
+ OwningExprResult RebuildCXXScalarValueInitExpr(SourceLocation TypeStartLoc,
SourceLocation LParenLoc,
QualType T,
SourceLocation RParenLoc) {
@@ -1580,7 +1596,7 @@ public:
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- bool ParenTypeId,
+ SourceRange TypeIdParens,
QualType AllocType,
SourceLocation TypeLoc,
SourceRange TypeRange,
@@ -1592,7 +1608,7 @@ public:
PlacementLParen,
move(PlacementArgs),
PlacementRParen,
- ParenTypeId,
+ TypeIdParens,
AllocType,
TypeLoc,
TypeRange,
@@ -1815,7 +1831,8 @@ public:
Sema::LookupMemberName);
OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
/*FIME:*/IvarLoc,
- SS, DeclPtrTy());
+ SS, DeclPtrTy(),
+ false);
if (Result.isInvalid())
return getSema().ExprError();
@@ -1844,7 +1861,8 @@ public:
bool IsArrow = false;
OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
/*FIME:*/PropertyLoc,
- SS, DeclPtrTy());
+ SS, DeclPtrTy(),
+ false);
if (Result.isInvalid())
return getSema().ExprError();
@@ -1892,7 +1910,8 @@ public:
Sema::LookupMemberName);
OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
/*FIME:*/IsaLoc,
- SS, DeclPtrTy());
+ SS, DeclPtrTy(),
+ false);
if (Result.isInvalid())
return getSema().ExprError();
@@ -1933,7 +1952,7 @@ public:
Expr **Subs = (Expr **)SubExprs.release();
CallExpr *TheCall = new (SemaRef.Context) CallExpr(SemaRef.Context, Callee,
Subs, NumSubExprs,
- Builtin->getResultType(),
+ Builtin->getCallResultType(),
RParenLoc);
OwningExprResult OwnedCall(SemaRef.Owned(TheCall));
@@ -2405,11 +2424,11 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
if (Result->isFunctionType() || Result->isReferenceType())
return Result;
- Result = SemaRef.Context.getQualifiedType(Result, Quals);
-
- TLB.push<QualifiedTypeLoc>(Result);
-
- // No location information to preserve.
+ if (!Quals.empty()) {
+ Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals);
+ TLB.push<QualifiedTypeLoc>(Result);
+ // No location information to preserve.
+ }
return Result;
}
@@ -2792,7 +2811,7 @@ QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
if (getDerived().AlwaysRebuild() ||
ElementType != T->getElementType()) {
Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(),
- T->isAltiVec(), T->isPixel());
+ T->getAltiVecSpecific());
if (Result.isNull())
return QualType();
}
@@ -3298,46 +3317,23 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
if (!NNS)
return QualType();
- QualType Result;
-
- if (const TemplateSpecializationType *TemplateId = T->getTemplateId()) {
- QualType NewTemplateId
- = getDerived().TransformType(QualType(TemplateId, 0));
- if (NewTemplateId.isNull())
- return QualType();
-
- if (!getDerived().AlwaysRebuild() &&
- NNS == T->getQualifier() &&
- NewTemplateId == QualType(TemplateId, 0))
- return QualType(T, 0);
-
- Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS,
- NewTemplateId);
- } else {
- Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS,
- T->getIdentifier(),
- TL.getKeywordLoc(),
- TL.getQualifierRange(),
- TL.getNameLoc());
- }
+ QualType Result
+ = getDerived().RebuildDependentNameType(T->getKeyword(), NNS,
+ T->getIdentifier(),
+ TL.getKeywordLoc(),
+ TL.getQualifierRange(),
+ TL.getNameLoc());
if (Result.isNull())
return QualType();
if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
QualType NamedT = ElabT->getNamedType();
- if (isa<TemplateSpecializationType>(NamedT)) {
- TemplateSpecializationTypeLoc NamedTLoc
- = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
- // FIXME: fill locations
- NamedTLoc.initializeLocal(TL.getNameLoc());
- } else {
- TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
- }
+ TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
+
ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
NewTL.setKeywordLoc(TL.getKeywordLoc());
NewTL.setQualifierRange(TL.getQualifierRange());
- }
- else {
+ } else {
DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
NewTL.setKeywordLoc(TL.getKeywordLoc());
NewTL.setQualifierRange(TL.getQualifierRange());
@@ -3347,6 +3343,62 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
}
template<typename Derived>
+QualType TreeTransform<Derived>::
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ QualType ObjectType) {
+ DependentTemplateSpecializationType *T = TL.getTypePtr();
+
+ NestedNameSpecifier *NNS
+ = getDerived().TransformNestedNameSpecifier(T->getQualifier(),
+ TL.getQualifierRange(),
+ ObjectType);
+ if (!NNS)
+ return QualType();
+
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+
+ for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I) {
+ TemplateArgumentLoc Loc;
+ if (getDerived().TransformTemplateArgument(TL.getArgLoc(I), Loc))
+ return QualType();
+ NewTemplateArgs.addArgument(Loc);
+ }
+
+ QualType Result = getDerived().RebuildDependentTemplateSpecializationType(
+ T->getKeyword(),
+ NNS,
+ T->getIdentifier(),
+ TL.getNameLoc(),
+ NewTemplateArgs);
+ if (Result.isNull())
+ return QualType();
+
+ if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
+ QualType NamedT = ElabT->getNamedType();
+
+ // Copy information relevant to the template specialization.
+ TemplateSpecializationTypeLoc NamedTL
+ = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
+ NamedTL.setLAngleLoc(TL.getLAngleLoc());
+ NamedTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ NamedTL.setArgLocInfo(I, TL.getArgLocInfo(I));
+
+ // Copy information relevant to the elaborated type.
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setKeywordLoc(TL.getKeywordLoc());
+ NewTL.setQualifierRange(TL.getQualifierRange());
+ } else {
+ TypeLoc NewTL(Result, TL.getOpaqueData());
+ TLB.pushFullCopy(NewTL);
+ }
+ return Result;
+}
+
+template<typename Derived>
QualType
TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
ObjCInterfaceTypeLoc TL,
@@ -5167,7 +5219,7 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
template<typename Derived>
Sema::OwningExprResult
-TreeTransform<Derived>::TransformCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+TreeTransform<Derived>::TransformCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName());
QualType T = getDerived().TransformType(E->getType());
@@ -5178,10 +5230,10 @@ TreeTransform<Derived>::TransformCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
T == E->getType())
return SemaRef.Owned(E->Retain());
- return getDerived().RebuildCXXZeroInitValueExpr(E->getTypeBeginLoc(),
- /*FIXME:*/E->getTypeBeginLoc(),
- T,
- E->getRParenLoc());
+ return getDerived().RebuildCXXScalarValueInitExpr(E->getTypeBeginLoc(),
+ /*FIXME:*/E->getTypeBeginLoc(),
+ T,
+ E->getRParenLoc());
}
template<typename Derived>
@@ -5300,7 +5352,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
/*FIXME:*/E->getLocStart(),
move_arg(PlacementArgs),
/*FIXME:*/E->getLocStart(),
- E->isParenTypeId(),
+ E->getTypeIdParens(),
AllocType,
/*FIXME:*/E->getLocStart(),
/*FIXME:*/SourceRange(),
@@ -6165,17 +6217,75 @@ TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
template<typename Derived>
Sema::OwningExprResult
TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
- // FIXME: Implement this!
- assert(false && "Cannot transform block expressions yet");
- return SemaRef.Owned(E->Retain());
+ SourceLocation CaretLoc(E->getExprLoc());
+
+ SemaRef.ActOnBlockStart(CaretLoc, /*Scope=*/0);
+ BlockScopeInfo *CurBlock = SemaRef.getCurBlock();
+ CurBlock->TheDecl->setIsVariadic(E->getBlockDecl()->isVariadic());
+ llvm::SmallVector<ParmVarDecl*, 4> Params;
+ llvm::SmallVector<QualType, 4> ParamTypes;
+
+ // Parameter substitution.
+ const BlockDecl *BD = E->getBlockDecl();
+ for (BlockDecl::param_const_iterator P = BD->param_begin(),
+ EN = BD->param_end(); P != EN; ++P) {
+ ParmVarDecl *OldParm = (*P);
+ ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm);
+ QualType NewType = NewParm->getType();
+ Params.push_back(NewParm);
+ ParamTypes.push_back(NewParm->getType());
+ }
+
+ const FunctionType *BExprFunctionType = E->getFunctionType();
+ QualType BExprResultType = BExprFunctionType->getResultType();
+ if (!BExprResultType.isNull()) {
+ if (!BExprResultType->isDependentType())
+ CurBlock->ReturnType = BExprResultType;
+ else if (BExprResultType != SemaRef.Context.DependentTy)
+ CurBlock->ReturnType = getDerived().TransformType(BExprResultType);
+ }
+
+ // Transform the body
+ OwningStmtResult Body = getDerived().TransformStmt(E->getBody());
+ if (Body.isInvalid())
+ return SemaRef.ExprError();
+ // Set the parameters on the block decl.
+ if (!Params.empty())
+ CurBlock->TheDecl->setParams(Params.data(), Params.size());
+
+ QualType FunctionType = getDerived().RebuildFunctionProtoType(
+ CurBlock->ReturnType,
+ ParamTypes.data(),
+ ParamTypes.size(),
+ BD->isVariadic(),
+ 0);
+
+ CurBlock->FunctionType = FunctionType;
+ return SemaRef.ActOnBlockStmtExpr(CaretLoc, move(Body), /*Scope=*/0);
}
template<typename Derived>
Sema::OwningExprResult
TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) {
- // FIXME: Implement this!
- assert(false && "Cannot transform block-related expressions yet");
- return SemaRef.Owned(E->Retain());
+ NestedNameSpecifier *Qualifier = 0;
+
+ ValueDecl *ND
+ = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(),
+ E->getDecl()));
+ if (!ND)
+ return SemaRef.ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ ND == E->getDecl()) {
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkDeclarationReferenced(E->getLocation(), ND);
+
+ return SemaRef.Owned(E->Retain());
+ }
+
+ return getDerived().RebuildDeclRefExpr(Qualifier, SourceLocation(),
+ ND, E->getLocation(), 0);
}
//===----------------------------------------------------------------------===//
@@ -6185,14 +6295,14 @@ TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) {
template<typename Derived>
QualType TreeTransform<Derived>::RebuildPointerType(QualType PointeeType,
SourceLocation Star) {
- return SemaRef.BuildPointerType(PointeeType, Qualifiers(), Star,
+ return SemaRef.BuildPointerType(PointeeType, Star,
getDerived().getBaseEntity());
}
template<typename Derived>
QualType TreeTransform<Derived>::RebuildBlockPointerType(QualType PointeeType,
SourceLocation Star) {
- return SemaRef.BuildBlockPointerType(PointeeType, Qualifiers(), Star,
+ return SemaRef.BuildBlockPointerType(PointeeType, Star,
getDerived().getBaseEntity());
}
@@ -6201,7 +6311,7 @@ QualType
TreeTransform<Derived>::RebuildReferenceType(QualType ReferentType,
bool WrittenAsLValue,
SourceLocation Sigil) {
- return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue, Qualifiers(),
+ return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue,
Sigil, getDerived().getBaseEntity());
}
@@ -6210,7 +6320,7 @@ QualType
TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType,
QualType ClassType,
SourceLocation Sigil) {
- return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Qualifiers(),
+ return SemaRef.BuildMemberPointerType(PointeeType, ClassType,
Sigil, getDerived().getBaseEntity());
}
@@ -6293,11 +6403,10 @@ TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType,
template<typename Derived>
QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
- unsigned NumElements,
- bool IsAltiVec, bool IsPixel) {
+ unsigned NumElements,
+ VectorType::AltiVecSpecific AltiVecSpec) {
// FIXME: semantic checking!
- return SemaRef.Context.getVectorType(ElementType, NumElements,
- IsAltiVec, IsPixel);
+ return SemaRef.Context.getVectorType(ElementType, NumElements, AltiVecSpec);
}
template<typename Derived>
@@ -6449,13 +6558,15 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier,
SS.setScopeRep(Qualifier);
UnqualifiedId Name;
Name.setIdentifier(&II, /*FIXME:*/getDerived().getBaseLocation());
- return getSema().ActOnDependentTemplateName(
- /*FIXME:*/getDerived().getBaseLocation(),
- SS,
- Name,
- ObjectType.getAsOpaquePtr(),
- /*EnteringContext=*/false)
- .template getAsVal<TemplateName>();
+ Sema::TemplateTy Template;
+ getSema().ActOnDependentTemplateName(/*Scope=*/0,
+ /*FIXME:*/getDerived().getBaseLocation(),
+ SS,
+ Name,
+ ObjectType.getAsOpaquePtr(),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.template getAsVal<TemplateName>();
}
template<typename Derived>
@@ -6470,13 +6581,15 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier,
SourceLocation SymbolLocations[3]; // FIXME: Bogus location information.
Name.setOperatorFunctionId(/*FIXME:*/getDerived().getBaseLocation(),
Operator, SymbolLocations);
- return getSema().ActOnDependentTemplateName(
+ Sema::TemplateTy Template;
+ getSema().ActOnDependentTemplateName(/*Scope=*/0,
/*FIXME:*/getDerived().getBaseLocation(),
- SS,
- Name,
- ObjectType.getAsOpaquePtr(),
- /*EnteringContext=*/false)
- .template getAsVal<TemplateName>();
+ SS,
+ Name,
+ ObjectType.getAsOpaquePtr(),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.template getAsVal<TemplateName>();
}
template<typename Derived>
diff --git a/contrib/llvm/tools/clang/lib/Runtime/Makefile b/contrib/llvm/tools/clang/runtime/Makefile
index 580215a..0e8b359 100644
--- a/contrib/llvm/tools/clang/lib/Runtime/Makefile
+++ b/contrib/llvm/tools/clang/runtime/Makefile
@@ -1,4 +1,4 @@
-##===- clang/lib/Runtime/Makefile --------------------------*- Makefile -*-===##
+##===- clang/runtime/Makefile ------------------------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
@@ -13,10 +13,12 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-include $(LEVEL)/Makefile.common
+CLANG_LEVEL := ..
+include $(CLANG_LEVEL)/Makefile
+
+CLANG_VERSION := $(word 3,$(shell grep "CLANG_VERSION " \
+ $(PROJ_OBJ_DIR)/$(CLANG_LEVEL)/include/clang/Basic/Version.inc))
-CLANG_VERSION := $(shell cat $(PROJ_SRC_DIR)/../../VER)
ResourceDir := $(PROJ_OBJ_ROOT)/$(BuildMode)/lib/clang/$(CLANG_VERSION)
PROJ_resources := $(DESTDIR)$(PROJ_prefix)/lib/clang/$(CLANG_VERSION)
@@ -48,6 +50,7 @@ BuildRuntimeLibraries:
$(Verb) $(MAKE) -C $(COMPILERRT_SRC_ROOT) \
ProjSrcRoot=$(COMPILERRT_SRC_ROOT) \
ProjObjRoot=$(PROJ_OBJ_DIR) \
+ CC="$(ToolDir)/clang -no-integrated-as" \
$(RuntimeDirs:%=clang_%)
.PHONY: BuildRuntimeLibraries
CleanRuntimeLibraries:
diff --git a/contrib/llvm/tools/clang/tools/Makefile b/contrib/llvm/tools/clang/tools/Makefile
index 8407dfd..0202cc5 100644
--- a/contrib/llvm/tools/clang/tools/Makefile
+++ b/contrib/llvm/tools/clang/tools/Makefile
@@ -7,13 +7,13 @@
#
##===----------------------------------------------------------------------===##
-LEVEL := ../../..
+CLANG_LEVEL := ..
DIRS := driver libclang c-index-test
-include $(LEVEL)/Makefile.config
+include $(CLANG_LEVEL)/../../Makefile.config
-ifeq ($(OS), $(filter $(OS), Cygwin MingW))
+ifeq ($(OS), $(filter $(OS), Cygwin MingW Minix))
DIRS := $(filter-out libclang c-index-test, $(DIRS))
endif
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/tools/c-index-test/Makefile b/contrib/llvm/tools/clang/tools/c-index-test/Makefile
index 24fed16..d168df5 100644
--- a/contrib/llvm/tools/clang/tools/c-index-test/Makefile
+++ b/contrib/llvm/tools/clang/tools/c-index-test/Makefile
@@ -6,18 +6,15 @@
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
TOOLNAME = c-index-test
-CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
# No plugins, optimize startup time.
TOOL_NO_EXPORTS = 1
-include $(LEVEL)/Makefile.config
-
LINK_COMPONENTS := bitreader mc core
USEDLIBS = clang.a clangIndex.a clangFrontend.a clangDriver.a clangSema.a \
clangAnalysis.a clangAST.a clangParse.a clangLex.a clangBasic.a
-include $(LLVM_SRC_ROOT)/Makefile.rules
+include $(CLANG_LEVEL)/Makefile
diff --git a/contrib/llvm/tools/clang/tools/c-index-test/c-index-test.c b/contrib/llvm/tools/clang/tools/c-index-test/c-index-test.c
index 4268cec..4ed24b1 100644
--- a/contrib/llvm/tools/clang/tools/c-index-test/c-index-test.c
+++ b/contrib/llvm/tools/clang/tools/c-index-test/c-index-test.c
@@ -455,16 +455,29 @@ static enum CXChildVisitResult PrintTypeKind(CXCursor cursor, CXCursor p,
if (!clang_isInvalid(clang_getCursorKind(cursor))) {
CXType T = clang_getCursorType(cursor);
- CXType CT = clang_getCanonicalType(T);
CXString S = clang_getTypeKindSpelling(T.kind);
PrintCursor(cursor);
printf(" typekind=%s", clang_getCString(S));
- if (!clang_equalTypes(T, CT)) {
- CXString CS = clang_getTypeKindSpelling(CT.kind);
- printf(" [canonical=%s]", clang_getCString(CS));
- clang_disposeString(CS);
- }
clang_disposeString(S);
+ /* Print the canonical type if it is different. */
+ {
+ CXType CT = clang_getCanonicalType(T);
+ if (!clang_equalTypes(T, CT)) {
+ CXString CS = clang_getTypeKindSpelling(CT.kind);
+ printf(" [canonical=%s]", clang_getCString(CS));
+ clang_disposeString(CS);
+ }
+ }
+ /* Print the return type if it exists. */
+ {
+ CXType RT = clang_getCursorResultType(cursor);
+ if (RT.kind != CXType_Invalid) {
+ CXString RS = clang_getTypeKindSpelling(RT.kind);
+ printf(" [result=%s]", clang_getCString(RS));
+ clang_disposeString(RS);
+ }
+ }
+
printf("\n");
}
return CXChildVisit_Recurse;
@@ -786,7 +799,7 @@ void print_completion_result(CXCompletionResult *completion_result,
clang_getCompletionPriority(completion_result->CompletionString));
}
-int perform_code_completion(int argc, const char **argv) {
+int perform_code_completion(int argc, const char **argv, int timing_only) {
const char *input = argv[1];
char *filename = 0;
unsigned line;
@@ -797,7 +810,11 @@ int perform_code_completion(int argc, const char **argv) {
int num_unsaved_files = 0;
CXCodeCompleteResults *results = 0;
- input += strlen("-code-completion-at=");
+ if (timing_only)
+ input += strlen("-code-completion-timing=");
+ else
+ input += strlen("-code-completion-at=");
+
if ((errorCode = parse_file_line_column(input, &filename, &line, &column,
0, 0)))
return errorCode;
@@ -814,8 +831,9 @@ int perform_code_completion(int argc, const char **argv) {
if (results) {
unsigned i, n = results->NumResults;
- for (i = 0; i != n; ++i)
- print_completion_result(results->Results + i, stdout);
+ if (!timing_only)
+ for (i = 0; i != n; ++i)
+ print_completion_result(results->Results + i, stdout);
n = clang_codeCompleteGetNumDiagnostics(results);
for (i = 0; i != n; ++i) {
CXDiagnostic diag = clang_codeCompleteGetDiagnostic(results, i);
@@ -1191,6 +1209,7 @@ static CXCursorVisitor GetVisitor(const char *s) {
static void print_usage(void) {
fprintf(stderr,
"usage: c-index-test -code-completion-at=<site> <compiler arguments>\n"
+ " c-index-test -code-completion-timing=<site> <compiler arguments>\n"
" c-index-test -cursor-at=<site> <compiler arguments>\n"
" c-index-test -test-file-scan <AST file> <source file> "
"[FileCheck prefix]\n"
@@ -1198,9 +1217,9 @@ static void print_usage(void) {
"[FileCheck prefix]\n"
" c-index-test -test-load-tu-usrs <AST file> <symbol filter> "
"[FileCheck prefix]\n"
- " c-index-test -test-load-source <symbol filter> {<args>}*\n"
- " c-index-test -test-load-source-usrs <symbol filter> {<args>}*\n");
+ " c-index-test -test-load-source <symbol filter> {<args>}*\n");
fprintf(stderr,
+ " c-index-test -test-load-source-usrs <symbol filter> {<args>}*\n"
" c-index-test -test-annotate-tokens=<range> {<args>}*\n"
" c-index-test -test-inclusion-stack-source {<args>}*\n"
" c-index-test -test-inclusion-stack-tu <AST file>\n"
@@ -1222,7 +1241,9 @@ static void print_usage(void) {
int main(int argc, const char **argv) {
clang_enableStackTraces();
if (argc > 2 && strstr(argv[1], "-code-completion-at=") == argv[1])
- return perform_code_completion(argc, argv);
+ return perform_code_completion(argc, argv, 0);
+ if (argc > 2 && strstr(argv[1], "-code-completion-timing=") == argv[1])
+ return perform_code_completion(argc, argv, 1);
if (argc > 2 && strstr(argv[1], "-cursor-at=") == argv[1])
return inspect_cursor_at(argc, argv);
else if (argc >= 4 && strncmp(argv[1], "-test-load-tu", 13) == 0) {
diff --git a/contrib/llvm/tools/clang/tools/driver/CMakeLists.txt b/contrib/llvm/tools/clang/tools/driver/CMakeLists.txt
index 706f050..0eaddba 100644
--- a/contrib/llvm/tools/clang/tools/driver/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/tools/driver/CMakeLists.txt
@@ -16,6 +16,7 @@ set( LLVM_USED_LIBS
set( LLVM_LINK_COMPONENTS
${LLVM_TARGETS_TO_BUILD}
+ asmparser
bitreader
bitwriter
codegen
diff --git a/contrib/llvm/tools/clang/tools/driver/Makefile b/contrib/llvm/tools/clang/tools/driver/Makefile
index f88d229..b049af6 100644
--- a/contrib/llvm/tools/clang/tools/driver/Makefile
+++ b/contrib/llvm/tools/clang/tools/driver/Makefile
@@ -6,7 +6,7 @@
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
TOOLNAME = clang
ifndef CLANG_IS_PRODUCTION
@@ -16,22 +16,19 @@ else
TOOLALIAS = clang++
endif
endif
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-# Clang tool has no plugins, optimize startup time.
-TOOL_NO_EXPORTS = 1
# Include this here so we can get the configuration of the targets that have
# been configured for construction. We have to do this early so we can set up
# LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
+include $(CLANG_LEVEL)/../../Makefile.config
-LINK_COMPONENTS := $(TARGETS_TO_BUILD) bitreader bitwriter codegen ipo selectiondag
+LINK_COMPONENTS := $(TARGETS_TO_BUILD) asmparser bitreader bitwriter codegen \
+ ipo selectiondag
USEDLIBS = clangFrontend.a clangDriver.a clangCodeGen.a clangSema.a \
clangChecker.a clangAnalysis.a clangRewrite.a clangAST.a \
clangParse.a clangLex.a clangBasic.a
-include $(LLVM_SRC_ROOT)/Makefile.rules
+include $(CLANG_LEVEL)/Makefile
# Translate make variable to define when building a "production" clang.
ifdef CLANG_IS_PRODUCTION
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
index ac19e93..841e40a 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -14,12 +14,13 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Diagnostic.h"
+#include "clang/Checker/FrontendActions.h"
+#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
#include "clang/Driver/CC1Options.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/OptTable.h"
-#include "clang/Frontend/CodeGenAction.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -27,6 +28,7 @@
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Rewrite/FrontendActions.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/Statistic.h"
@@ -83,21 +85,15 @@ static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
case ParseSyntaxOnly: return new SyntaxOnlyAction();
case PluginAction: {
- if (CI.getFrontendOpts().ActionName == "help") {
- llvm::errs() << "clang -cc1 plugins:\n";
- for (FrontendPluginRegistry::iterator it =
- FrontendPluginRegistry::begin(),
- ie = FrontendPluginRegistry::end();
- it != ie; ++it)
- llvm::errs() << " " << it->getName() << " - " << it->getDesc() << "\n";
- return 0;
- }
for (FrontendPluginRegistry::iterator it =
FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
it != ie; ++it) {
- if (it->getName() == CI.getFrontendOpts().ActionName)
- return it->instantiate();
+ if (it->getName() == CI.getFrontendOpts().ActionName) {
+ PluginASTAction* plugin = it->instantiate();
+ plugin->ParseArgs(CI.getFrontendOpts().PluginArgs);
+ return plugin;
+ }
}
CI.getDiagnostics().Report(diag::err_fe_invalid_plugin_name)
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
index 5f1ee09..3c5ca92 100644
--- a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -136,7 +136,7 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Issue errors on unknown arguments.
for (arg_iterator it = Args->filtered_begin(cc1asoptions::OPT_UNKNOWN),
ie = Args->filtered_end(); it != ie; ++it)
- Diags.Report(diag::err_drv_unknown_argument) << it->getAsString(*Args);
+ Diags.Report(diag::err_drv_unknown_argument) << (*it) ->getAsString(*Args);
// Construct the invocation.
@@ -154,10 +154,11 @@ void AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
bool First = true;
for (arg_iterator it = Args->filtered_begin(OPT_INPUT),
ie = Args->filtered_end(); it != ie; ++it, First=false) {
+ const Arg *A = it;
if (First)
- Opts.InputFile = it->getValue(*Args);
+ Opts.InputFile = A->getValue(*Args);
else
- Diags.Report(diag::err_drv_unknown_argument) << it->getAsString(*Args);
+ Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(*Args);
}
}
Opts.LLVMArgs = Args->getAllArgValues(OPT_mllvm);
@@ -274,7 +275,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts, Diagnostic &Diags) {
Str.reset(createMachOStreamer(Ctx, *TAB, *Out, CE.get(), Opts.RelaxAll));
}
- AsmParser Parser(SrcMgr, Ctx, *Str.get(), *MAI);
+ AsmParser Parser(*TheTarget, SrcMgr, Ctx, *Str.get(), *MAI);
OwningPtr<TargetAsmParser> TAP(TheTarget->createAsmParser(Parser));
if (!TAP) {
Diags.Report(diag::err_target_unknown_triple) << Opts.Triple;
diff --git a/contrib/llvm/tools/clang/tools/libclang/CIndex.cpp b/contrib/llvm/tools/clang/tools/libclang/CIndex.cpp
index a077589..7f32a1c 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CIndex.cpp
+++ b/contrib/llvm/tools/clang/tools/libclang/CIndex.cpp
@@ -177,12 +177,12 @@ static RangeComparisonResult LocationCompare(SourceManager &SM,
/// does the appropriate translation.
CXSourceRange cxloc::translateSourceRange(const SourceManager &SM,
const LangOptions &LangOpts,
- SourceRange R) {
+ const CharSourceRange &R) {
// We want the last character in this location, so we will adjust the
// location accordingly.
// FIXME: How do do this with a macro instantiation location?
SourceLocation EndLoc = R.getEnd();
- if (!EndLoc.isInvalid() && EndLoc.isFileID()) {
+ if (R.isTokenRange() && !EndLoc.isInvalid() && EndLoc.isFileID()) {
unsigned Length = Lexer::MeasureTokenLength(EndLoc, SM, LangOpts);
EndLoc = EndLoc.getFileLocWithOffset(Length);
}
@@ -517,10 +517,8 @@ bool CursorVisitor::VisitChildren(CXCursor Cursor) {
}
bool CursorVisitor::VisitBlockDecl(BlockDecl *B) {
- for (BlockDecl::param_iterator I=B->param_begin(), E=B->param_end(); I!=E;++I)
- if (Decl *D = *I)
- if (Visit(D))
- return true;
+ if (Visit(B->getSignatureAsWritten()->getTypeLoc()))
+ return true;
return Visit(MakeCXCursor(B->getBody(), StmtParent, TU));
}
@@ -672,6 +670,9 @@ bool CursorVisitor::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
}
bool CursorVisitor::VisitObjCPropertyDecl(ObjCPropertyDecl *PD) {
+ if (Visit(PD->getTypeSourceInfo()->getTypeLoc()))
+ return true;
+
// FIXME: This implements a workaround with @property declarations also being
// installed in the DeclContext for the @interface. Eventually this code
// should be removed.
@@ -1183,6 +1184,15 @@ clang_createTranslationUnitFromSourceFile(CXIndex CIdx,
// in the actual argument list.
if (source_filename)
Args.push_back(source_filename);
+
+ // Since the Clang C library is primarily used by batch tools dealing with
+ // (often very broken) source code, where spell-checking can have a
+ // significant negative impact on performance (particularly when
+ // precompiled headers are involved), we disable it by default.
+ // Note that we place this argument early in the list, so that it can be
+ // overridden by the caller with "-fspell-checking".
+ Args.push_back("-fno-spell-checking");
+
Args.insert(Args.end(), command_line_args,
command_line_args + num_command_line_args);
Args.push_back("-Xclang");
@@ -1246,6 +1256,14 @@ clang_createTranslationUnitFromSourceFile(CXIndex CIdx,
argv.push_back("-o");
char astTmpFile[L_tmpnam];
argv.push_back(tmpnam(astTmpFile));
+
+ // Since the Clang C library is primarily used by batch tools dealing with
+ // (often very broken) source code, where spell-checking can have a
+ // significant negative impact on performance (particularly when
+ // precompiled headers are involved), we disable it by default.
+ // Note that we place this argument early in the list, so that it can be
+ // overridden by the caller with "-fspell-checking".
+ argv.push_back("-fno-spell-checking");
// Remap any unsaved files to temporary files.
std::vector<llvm::sys::Path> TemporaryFiles;
@@ -1479,16 +1497,6 @@ CXSourceLocation clang_getRangeEnd(CXSourceRange range) {
return Result;
}
-unsigned clang_isFromMainFile(CXSourceLocation loc) {
- SourceLocation Loc = SourceLocation::getFromRawEncoding(loc.int_data);
- if (!loc.ptr_data[0] || Loc.isInvalid())
- return 0;
-
- const SourceManager &SM =
- *static_cast<const SourceManager*>(loc.ptr_data[0]);
- return SM.isFromMainFile(Loc) ? 1 : 0;
-}
-
} // end: extern "C"
//===----------------------------------------------------------------------===//
@@ -2048,6 +2056,7 @@ CXCursor clang_getCursorDefinition(CXCursor C) {
case Decl::TemplateTemplateParm:
case Decl::ObjCCategoryImpl:
case Decl::ObjCImplementation:
+ case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
diff --git a/contrib/llvm/tools/clang/tools/libclang/CIndexCodeCompletion.cpp b/contrib/llvm/tools/clang/tools/libclang/CIndexCodeCompletion.cpp
index 481a375..277fadf 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CIndexCodeCompletion.cpp
+++ b/contrib/llvm/tools/clang/tools/libclang/CIndexCodeCompletion.cpp
@@ -202,7 +202,7 @@ unsigned clang_getNumCompletionChunks(CXCompletionString completion_string) {
unsigned clang_getCompletionPriority(CXCompletionString completion_string) {
CXStoredCodeCompletionString *CCStr
= (CXStoredCodeCompletionString *)completion_string;
- return CCStr? CCStr->getPriority() : CCP_Unlikely;
+ return CCStr? CCStr->getPriority() : unsigned(CCP_Unlikely);
}
static bool ReadUnsigned(const char *&Memory, const char *MemoryEnd,
@@ -291,6 +291,9 @@ CXCodeCompleteResults *clang_codeComplete(CXIndex CIdx,
llvm::sys::Path ClangPath = CXXIdx->getClangPath();
argv.push_back(ClangPath.c_str());
+ // Always use Clang C++ support.
+ argv.push_back("-ccc-clang-cxx");
+
// Add the '-fsyntax-only' argument so that we only perform a basic
// syntax check of the code.
argv.push_back("-fsyntax-only");
diff --git a/contrib/llvm/tools/clang/tools/libclang/CIndexer.cpp b/contrib/llvm/tools/clang/tools/libclang/CIndexer.cpp
index d5131ff..cdf6c61 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CIndexer.cpp
+++ b/contrib/llvm/tools/clang/tools/libclang/CIndexer.cpp
@@ -135,6 +135,7 @@ bool clang::RemapFiles(unsigned num_unsaved_files,
OS.close();
if (OS.has_error()) {
SavedFile.eraseFromDisk();
+ OS.clear_error();
return true;
}
diff --git a/contrib/llvm/tools/clang/tools/libclang/CMakeLists.txt b/contrib/llvm/tools/clang/tools/libclang/CMakeLists.txt
index 62c9738..ab4acca 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CMakeLists.txt
+++ b/contrib/llvm/tools/clang/tools/libclang/CMakeLists.txt
@@ -3,7 +3,7 @@ set(SHARED_LIBRARY TRUE)
set(LLVM_NO_RTTI 1)
set(LLVM_USED_LIBS
- clangFrontend
+ clangFrontend
clangDriver
clangSema
clangAnalysis
@@ -29,7 +29,6 @@ add_clang_library(libclang
CXTypes.cpp
../../include/clang-c/Index.h
)
-set_target_properties(libclang PROPERTIES OUTPUT_NAME clang)
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# FIXME: Deal with LLVM_SUBMIT_VERSION?
diff --git a/contrib/llvm/tools/clang/tools/libclang/CXCursor.cpp b/contrib/llvm/tools/clang/tools/libclang/CXCursor.cpp
index f7192dd..be3623f 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CXCursor.cpp
+++ b/contrib/llvm/tools/clang/tools/libclang/CXCursor.cpp
@@ -78,9 +78,9 @@ static CXCursorKind GetCursorKind(const Attr *A) {
assert(A && "Invalid arguments!");
switch (A->getKind()) {
default: break;
- case Attr::IBActionKind: return CXCursor_IBActionAttr;
- case Attr::IBOutletKind: return CXCursor_IBOutletAttr;
- case Attr::IBOutletCollectionKind: return CXCursor_IBOutletCollectionAttr;
+ case attr::IBAction: return CXCursor_IBActionAttr;
+ case attr::IBOutlet: return CXCursor_IBOutletAttr;
+ case attr::IBOutletCollection: return CXCursor_IBOutletCollectionAttr;
}
return CXCursor_UnexposedAttr;
@@ -174,7 +174,7 @@ CXCursor cxcursor::MakeCXCursor(Stmt *S, Decl *Parent, ASTUnit *TU) {
case Stmt::CXXThisExprClass:
case Stmt::CXXThrowExprClass:
case Stmt::CXXDefaultArgExprClass:
- case Stmt::CXXZeroInitValueExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
case Stmt::CXXNewExprClass:
case Stmt::CXXDeleteExprClass:
case Stmt::CXXPseudoDestructorExprClass:
diff --git a/contrib/llvm/tools/clang/tools/libclang/CXSourceLocation.h b/contrib/llvm/tools/clang/tools/libclang/CXSourceLocation.h
index 66566c1..7a50205 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CXSourceLocation.h
+++ b/contrib/llvm/tools/clang/tools/libclang/CXSourceLocation.h
@@ -29,6 +29,9 @@ namespace cxloc {
static inline CXSourceLocation
translateSourceLocation(const SourceManager &SM, const LangOptions &LangOpts,
SourceLocation Loc) {
+ if (Loc.isInvalid())
+ clang_getNullLocation();
+
CXSourceLocation Result = { { (void*) &SM, (void*) &LangOpts, },
Loc.getRawEncoding() };
return Result;
@@ -50,14 +53,14 @@ static inline CXSourceLocation translateSourceLocation(ASTContext &Context,
/// does the appropriate translation.
CXSourceRange translateSourceRange(const SourceManager &SM,
const LangOptions &LangOpts,
- SourceRange R);
+ const CharSourceRange &R);
/// \brief Translate a Clang source range into a CIndex source range.
static inline CXSourceRange translateSourceRange(ASTContext &Context,
SourceRange R) {
return translateSourceRange(Context.getSourceManager(),
Context.getLangOptions(),
- R);
+ CharSourceRange::getTokenRange(R));
}
static inline SourceLocation translateSourceLocation(CXSourceLocation L) {
diff --git a/contrib/llvm/tools/clang/tools/libclang/CXTypes.cpp b/contrib/llvm/tools/clang/tools/libclang/CXTypes.cpp
index 137370a..d5c9f45 100644
--- a/contrib/llvm/tools/clang/tools/libclang/CXTypes.cpp
+++ b/contrib/llvm/tools/clang/tools/libclang/CXTypes.cpp
@@ -77,6 +77,8 @@ static CXTypeKind GetTypeKind(QualType T) {
TKCASE(Typedef);
TKCASE(ObjCInterface);
TKCASE(ObjCObjectPointer);
+ TKCASE(FunctionNoProto);
+ TKCASE(FunctionProto);
default:
return CXType_Unexposed;
}
@@ -116,7 +118,10 @@ CXType clang_getCursorType(CXCursor C) {
return MakeCXType(QualType(ID->getTypeForDecl(), 0), AU);
if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
return MakeCXType(VD->getType(), AU);
-
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
+ return MakeCXType(PD->getType(), AU);
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return MakeCXType(FD->getType(), AU);
return MakeCXType(QualType(), AU);
}
@@ -165,8 +170,15 @@ CXType clang_getPointeeType(CXType CT) {
}
CXCursor clang_getTypeDeclaration(CXType CT) {
+ if (CT.kind == CXType_Invalid)
+ return cxcursor::MakeCXCursorInvalid(CXCursor_NoDeclFound);
+
QualType T = GetQualType(CT);
Type *TP = T.getTypePtr();
+
+ if (!TP)
+ return cxcursor::MakeCXCursorInvalid(CXCursor_NoDeclFound);
+
Decl *D = 0;
switch (TP->getTypeClass()) {
@@ -237,6 +249,8 @@ CXString clang_getTypeKindSpelling(enum CXTypeKind K) {
TKIND(Typedef);
TKIND(ObjCInterface);
TKIND(ObjCObjectPointer);
+ TKIND(FunctionNoProto);
+ TKIND(FunctionProto);
}
#undef TKIND
return cxstring::createCXString(s);
@@ -246,4 +260,27 @@ unsigned clang_equalTypes(CXType A, CXType B) {
return A.data[0] == B.data[0] && A.data[1] == B.data[1];;
}
+CXType clang_getResultType(CXType X) {
+ QualType T = GetQualType(X);
+ if (!T.getTypePtr())
+ return MakeCXType(QualType(), GetASTU(X));
+
+ if (const FunctionType *FD = T->getAs<FunctionType>())
+ return MakeCXType(FD->getResultType(), GetASTU(X));
+
+ return MakeCXType(QualType(), GetASTU(X));
+}
+
+CXType clang_getCursorResultType(CXCursor C) {
+ if (clang_isDeclaration(C.kind)) {
+ Decl *D = cxcursor::getCursorDecl(C);
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MakeCXType(MD->getResultType(), cxcursor::getCursorASTUnit(C));
+
+ return clang_getResultType(clang_getCursorType(C));
+ }
+
+ return MakeCXType(QualType(), cxcursor::getCursorASTUnit(C));
+}
+
} // end: extern "C"
diff --git a/contrib/llvm/tools/clang/tools/libclang/Makefile b/contrib/llvm/tools/clang/tools/libclang/Makefile
index ff0fa33..253ea38 100644
--- a/contrib/llvm/tools/clang/tools/libclang/Makefile
+++ b/contrib/llvm/tools/clang/tools/libclang/Makefile
@@ -7,18 +7,11 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME = clang
EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/libclang.exports
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-# Include this here so we can get the configuration of the targets
-# that have been configured for construction. We have to do this
-# early so we can set up LINK_COMPONENTS before including Makefile.rules
-include $(LEVEL)/Makefile.config
-
LINK_LIBS_IN_SHARED = 1
SHARED_LIBRARY = 1
@@ -26,7 +19,7 @@ LINK_COMPONENTS := bitreader mc core
USEDLIBS = clangFrontend.a clangDriver.a clangSema.a \
clangAnalysis.a clangAST.a clangParse.a clangLex.a clangBasic.a
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
##===----------------------------------------------------------------------===##
# FIXME: This is copied from the 'lto' makefile. Should we share this?
diff --git a/contrib/llvm/tools/clang/tools/libclang/libclang.darwin.exports b/contrib/llvm/tools/clang/tools/libclang/libclang.darwin.exports
index a9f4f07..f21fec6 100644
--- a/contrib/llvm/tools/clang/tools/libclang/libclang.darwin.exports
+++ b/contrib/llvm/tools/clang/tools/libclang/libclang.darwin.exports
@@ -41,6 +41,7 @@ _clang_getCursorLinkage
_clang_getCursorLocation
_clang_getCursorReferenced
_clang_getCursorSpelling
+_clang_getCursorResultType
_clang_getCursorType
_clang_getCursorUSR
_clang_getDefinitionSpellingAndExtent
@@ -67,6 +68,7 @@ _clang_getPointeeType
_clang_getRange
_clang_getRangeEnd
_clang_getRangeStart
+_clang_getResultType
_clang_getTokenExtent
_clang_getTokenKind
_clang_getTokenLocation
@@ -78,7 +80,6 @@ _clang_getTypeKindSpelling
_clang_isCursorDefinition
_clang_isDeclaration
_clang_isExpression
-_clang_isFromMainFile
_clang_isInvalid
_clang_isPreprocessing
_clang_isReference
diff --git a/contrib/llvm/tools/clang/tools/libclang/libclang.exports b/contrib/llvm/tools/clang/tools/libclang/libclang.exports
index b09e6ac..dcb40d4 100644
--- a/contrib/llvm/tools/clang/tools/libclang/libclang.exports
+++ b/contrib/llvm/tools/clang/tools/libclang/libclang.exports
@@ -41,6 +41,7 @@ clang_getCursorLinkage
clang_getCursorLocation
clang_getCursorReferenced
clang_getCursorSpelling
+clang_getCursorResultType
clang_getCursorType
clang_getCursorUSR
clang_getDefinitionSpellingAndExtent
@@ -67,6 +68,7 @@ clang_getPointeeType
clang_getRange
clang_getRangeEnd
clang_getRangeStart
+clang_getResultType
clang_getTokenExtent
clang_getTokenKind
clang_getTokenLocation
@@ -78,7 +80,6 @@ clang_getTypeKindSpelling
clang_isCursorDefinition
clang_isDeclaration
clang_isExpression
-clang_isFromMainFile
clang_isInvalid
clang_isPreprocessing
clang_isReference
@@ -88,4 +89,3 @@ clang_isUnexposed
clang_setUseExternalASTGeneration
clang_tokenize
clang_visitChildren
-
diff --git a/contrib/llvm/tools/clang/tools/scan-build/ccc-analyzer b/contrib/llvm/tools/clang/tools/scan-build/ccc-analyzer
index 391ea57..c182a68 100755
--- a/contrib/llvm/tools/clang/tools/scan-build/ccc-analyzer
+++ b/contrib/llvm/tools/clang/tools/scan-build/ccc-analyzer
@@ -315,11 +315,13 @@ sub Analyze {
my %CompileOptionMap = (
'-nostdinc' => 0,
'-fblocks' => 0,
+ '-fno-builtin' => 0,
'-fobjc-gc-only' => 0,
'-fobjc-gc' => 0,
'-ffreestanding' => 0,
'-include' => 1,
'-idirafter' => 1,
+ '-imacros' => 1,
'-iprefix' => 1,
'-iquote' => 1,
'-isystem' => 1,
@@ -364,6 +366,7 @@ my %IgnoredOptionMap = (
my %LangMap = (
'c' => 'c',
+ 'cp' => 'c++',
'cpp' => 'c++',
'cc' => 'c++',
'i' => 'c-cpp-output',
diff --git a/contrib/llvm/tools/clang/utils/FuzzTest b/contrib/llvm/tools/clang/utils/FuzzTest
new file mode 100755
index 0000000..2aa5989
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/FuzzTest
@@ -0,0 +1,340 @@
+#!/usr/bin/env python
+
+"""
+This is a generic fuzz testing tool, see --help for more information.
+"""
+
+import os
+import sys
+import random
+import subprocess
+import itertools
+
+class TestGenerator:
+ def __init__(self, inputs, delete, insert, replace,
+ insert_strings, pick_input):
+ self.inputs = [(s, open(s).read()) for s in inputs]
+
+ self.delete = bool(delete)
+ self.insert = bool(insert)
+ self.replace = bool(replace)
+ self.pick_input = bool(pick_input)
+ self.insert_strings = list(insert_strings)
+
+ self.num_positions = sum([len(d) for _,d in self.inputs])
+ self.num_insert_strings = len(insert_strings)
+ self.num_tests = ((delete + (insert + replace)*self.num_insert_strings)
+ * self.num_positions)
+ self.num_tests += 1
+
+ if self.pick_input:
+ self.num_tests *= self.num_positions
+
+ def position_to_source_index(self, position):
+ for i,(s,d) in enumerate(self.inputs):
+ n = len(d)
+ if position < n:
+ return (i,position)
+ position -= n
+ raise ValueError,'Invalid position.'
+
+ def get_test(self, index):
+ assert 0 <= index < self.num_tests
+
+ picked_position = None
+ if self.pick_input:
+ index,picked_position = divmod(index, self.num_positions)
+ picked_position = self.position_to_source_index(picked_position)
+
+ if index == 0:
+ return ('nothing', None, None, picked_position)
+
+ index -= 1
+ index,position = divmod(index, self.num_positions)
+ position = self.position_to_source_index(position)
+ if self.delete:
+ if index == 0:
+ return ('delete', position, None, picked_position)
+ index -= 1
+
+ index,insert_index = divmod(index, self.num_insert_strings)
+ insert_str = self.insert_strings[insert_index]
+ if self.insert:
+ if index == 0:
+ return ('insert', position, insert_str, picked_position)
+ index -= 1
+
+ assert self.replace
+ assert index == 0
+ return ('replace', position, insert_str, picked_position)
+
+class TestApplication:
+ def __init__(self, tg, test):
+ self.tg = tg
+ self.test = test
+
+ def apply(self):
+ if self.test[0] == 'nothing':
+ pass
+ else:
+ i,j = self.test[1]
+ name,data = self.tg.inputs[i]
+ if self.test[0] == 'delete':
+ data = data[:j] + data[j+1:]
+ elif self.test[0] == 'insert':
+ data = data[:j] + self.test[2] + data[j:]
+ elif self.test[0] == 'replace':
+ data = data[:j] + self.test[2] + data[j+1:]
+ else:
+ raise ValueError,'Invalid test %r' % self.test
+ open(name,'wb').write(data)
+
+ def revert(self):
+ if self.test[0] != 'nothing':
+ i,j = self.test[1]
+ name,data = self.tg.inputs[i]
+ open(name,'wb').write(data)
+
+def quote(str):
+ return '"' + str + '"'
+
+def run_one_test(test_application, index, input_files, args):
+ test = test_application.test
+
+ # Interpolate arguments.
+ options = { 'index' : index,
+ 'inputs' : ' '.join(quote(f) for f in input_files) }
+
+ # Add picked input interpolation arguments, if used.
+ if test[3] is not None:
+ pos = test[3][1]
+ options['picked_input'] = input_files[test[3][0]]
+ options['picked_input_pos'] = pos
+ # Compute the line and column.
+ file_data = test_application.tg.inputs[test[3][0]][1]
+ line = column = 1
+ for i in range(pos):
+ c = file_data[i]
+ if c == '\n':
+ line += 1
+ column = 1
+ else:
+ column += 1
+ options['picked_input_line'] = line
+ options['picked_input_col'] = column
+
+ test_args = [a % options for a in args]
+ if opts.verbose:
+ print '%s: note: executing %r' % (sys.argv[0], test_args)
+
+ stdout = None
+ stderr = None
+ if opts.log_dir:
+ stdout_log_path = os.path.join(opts.log_dir, '%s.out' % index)
+ stderr_log_path = os.path.join(opts.log_dir, '%s.err' % index)
+ stdout = open(stdout_log_path, 'wb')
+ stderr = open(stderr_log_path, 'wb')
+ else:
+ sys.stdout.flush()
+ p = subprocess.Popen(test_args, stdout=stdout, stderr=stderr)
+ p.communicate()
+ exit_code = p.wait()
+
+ test_result = (exit_code == opts.expected_exit_code or
+ exit_code in opts.extra_exit_codes)
+
+ if stdout is not None:
+ stdout.close()
+ stderr.close()
+
+ # Remove the logs for passes, unless logging all results.
+ if not opts.log_all and test_result:
+ os.remove(stdout_log_path)
+ os.remove(stderr_log_path)
+
+ if not test_result:
+ print 'FAIL: %d' % index
+ elif not opts.succinct:
+ print 'PASS: %d' % index
+
+def main():
+ global opts
+ from optparse import OptionParser, OptionGroup
+ parser = OptionParser("""%prog [options] ... test command args ...
+
+%prog is a tool for fuzzing inputs and testing them.
+
+The most basic usage is something like:
+
+ $ %prog --file foo.txt ./test.sh
+
+which will run a default list of fuzzing strategies on the input. For each
+fuzzed input, it will overwrite the input files (in place), run the test script,
+then restore the files back to their original contents.
+
+NOTE: You should make sure you have a backup copy of your inputs, in case
+something goes wrong!!!
+
+You can cause the fuzzing to not restore the original files with
+'--no-revert'. Generally this is used with '--test <index>' to run one failing
+test and then leave the fuzzed inputs in place to examine the failure.
+
+For each fuzzed input, %prog will run the test command given on the command
+line. Each argument in the command is subject to string interpolation before
+being executed. The syntax is "%(VARIABLE)FORMAT" where FORMAT is a standard
+printf format, and VARIBLE is one of:
+
+ 'index' - the test index being run
+ 'inputs' - the full list of test inputs
+ 'picked_input' - (with --pick-input) the selected input file
+ 'picked_input_pos' - (with --pick-input) the selected input position
+ 'picked_input_line' - (with --pick-input) the selected input line
+ 'picked_input_col' - (with --pick-input) the selected input column
+
+By default, the script will run forever continually picking new tests to
+run. You can limit the number of tests that are run with '--max-tests <number>',
+and you can run a particular test with '--test <index>'.
+""")
+ parser.add_option("-v", "--verbose", help="Show more output",
+ action='store_true', dest="verbose", default=False)
+ parser.add_option("-s", "--succinct", help="Reduce amount of output",
+ action="store_true", dest="succinct", default=False)
+
+ group = OptionGroup(parser, "Test Execution")
+ group.add_option("", "--expected-exit-code", help="Set expected exit code",
+ type=int, dest="expected_exit_code",
+ default=0)
+ group.add_option("", "--extra-exit-code",
+ help="Set additional expected exit code",
+ type=int, action="append", dest="extra_exit_codes",
+ default=[])
+ group.add_option("", "--log-dir",
+ help="Capture test logs to an output directory",
+ type=str, dest="log_dir",
+ default=None)
+ group.add_option("", "--log-all",
+ help="Log all outputs (not just failures)",
+ action="store_true", dest="log_all", default=False)
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Input Files")
+ group.add_option("", "--file", metavar="PATH",
+ help="Add an input file to fuzz",
+ type=str, action="append", dest="input_files", default=[])
+ group.add_option("", "--filelist", metavar="LIST",
+ help="Add a list of inputs files to fuzz (one per line)",
+ type=int, action="append", dest="filelists", default=[])
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Fuzz Options")
+ group.add_option("", "--replacement-chars", dest="replacement_chars",
+ help="Characters to insert/replace",
+ default="0{}[]<>\;@#$^%& ")
+ group.add_option("", "--replacement-string", dest="replacement_strings",
+ action="append", help="Add a replacement string to use",
+ default=[])
+ group.add_option("", "--replacement-list", dest="replacement_lists",
+ help="Add a list of replacement strings (one per line)",
+ action="append", default=[])
+ group.add_option("", "--no-delete", help="Don't delete characters",
+ action='store_false', dest="enable_delete", default=True)
+ group.add_option("", "--no-insert", help="Don't insert strings",
+ action='store_false', dest="enable_insert", default=True)
+ group.add_option("", "--no-replace", help="Don't replace strings",
+ action='store_false', dest="enable_replace", default=True)
+ group.add_option("", "--no-revert", help="Don't revert changes",
+ action='store_false', dest="revert", default=True)
+ parser.add_option_group(group)
+
+ group = OptionGroup(parser, "Test Selection")
+ group.add_option("", "--test", help="Run a particular test",
+ type=int, dest="test", default=None, metavar="INDEX")
+ group.add_option("", "--max-tests", help="Maximum number of tests",
+ type=int, dest="max_tests", default=10, metavar="COUNT")
+ group.add_option("", "--pick-input",
+ help="Randomly select an input byte as well as fuzzing",
+ action='store_true', dest="pick_input", default=False)
+ parser.add_option_group(group)
+
+ parser.disable_interspersed_args()
+
+ (opts, args) = parser.parse_args()
+
+ if not args:
+ parser.error("Invalid number of arguments")
+
+ # Collect the list of inputs.
+ input_files = list(opts.input_files)
+ for filelist in opts.filelists:
+ f = open(filelist)
+ try:
+ for ln in f:
+ ln = ln.strip()
+ if ln:
+ input_files.append(ln)
+ finally:
+ f.close()
+ input_files.sort()
+
+ if not input_files:
+ parser.error("No input files!")
+
+ print '%s: note: fuzzing %d files.' % (sys.argv[0], len(input_files))
+
+ # Make sure the log directory exists if used.
+ if opts.log_dir:
+ if not os.path.exists(opts.log_dir):
+ try:
+ os.mkdir(opts.log_dir)
+ except OSError:
+ print "%s: error: log directory couldn't be created!" % (
+ sys.argv[0],)
+ raise SystemExit,1
+
+ # Get the list if insert/replacement strings.
+ replacements = list(opts.replacement_chars)
+ replacements.extend(opts.replacement_strings)
+ for replacement_list in opts.replacement_lists:
+ f = open(replacement_list)
+ try:
+ for ln in f:
+ ln = ln[:-1]
+ if ln:
+ replacements.append(ln)
+ finally:
+ f.close()
+
+ # Unique and order the replacement list.
+ replacements = list(set(replacements))
+ replacements.sort()
+
+ # Create the test generator.
+ tg = TestGenerator(input_files, opts.enable_delete, opts.enable_insert,
+ opts.enable_replace, replacements, opts.pick_input)
+
+ print '%s: note: %d input bytes.' % (sys.argv[0], tg.num_positions)
+ print '%s: note: %d total tests.' % (sys.argv[0], tg.num_tests)
+ if opts.test is not None:
+ it = [opts.test]
+ elif opts.max_tests is not None:
+ it = itertools.imap(random.randrange,
+ itertools.repeat(tg.num_tests, opts.max_tests))
+ else:
+ it = itertools.imap(random.randrange, itertools.repeat(tg.num_tests))
+ for test in it:
+ t = tg.get_test(test)
+
+ if opts.verbose:
+ print '%s: note: running test %d: %r' % (sys.argv[0], test, t)
+ ta = TestApplication(tg, t)
+ try:
+ ta.apply()
+ run_one_test(ta, test, input_files, args)
+ finally:
+ if opts.revert:
+ ta.revert()
+
+ sys.stdout.flush()
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/llvm/tools/clang/utils/TestUtils/pch-test.pl b/contrib/llvm/tools/clang/utils/TestUtils/pch-test.pl
index e097c5c..e4311e9 100755
--- a/contrib/llvm/tools/clang/utils/TestUtils/pch-test.pl
+++ b/contrib/llvm/tools/clang/utils/TestUtils/pch-test.pl
@@ -17,7 +17,7 @@ sub testfiles($$) {
@files = `ls test/*/*.$suffix`;
foreach $file (@files) {
chomp($file);
- my $code = system("clang- -fsyntax-only -x $language $file > /dev/null 2>&1");
+ my $code = system("clang -fsyntax-only -x $language $file > /dev/null 2>&1");
if ($code == 0) {
print(".");
$code = system("clang -cc1 -emit-pch -x $language -o $file.pch $file > /dev/null 2>&1");
diff --git a/contrib/llvm/tools/edis/EDDisassembler.cpp b/contrib/llvm/tools/edis/EDDisassembler.cpp
index 00b5d8d..85e41e6 100644
--- a/contrib/llvm/tools/edis/EDDisassembler.cpp
+++ b/contrib/llvm/tools/edis/EDDisassembler.cpp
@@ -364,7 +364,7 @@ int EDDisassembler::parseInst(SmallVectorImpl<MCParsedAsmOperand*> &operands,
sourceMgr.AddNewSourceBuffer(buf, SMLoc()); // ownership of buf handed over
MCContext context(*AsmInfo);
OwningPtr<MCStreamer> streamer(createNullStreamer(context));
- AsmParser genericParser(sourceMgr, context, *streamer, *AsmInfo);
+ AsmParser genericParser(*Tgt, sourceMgr, context, *streamer, *AsmInfo);
OwningPtr<TargetAsmParser> TargetParser(Tgt->createAsmParser(genericParser));
AsmToken OpcodeToken = genericParser.Lex();
diff --git a/contrib/llvm/tools/edis/Makefile b/contrib/llvm/tools/edis/Makefile
index 9151f62..0d2e26f 100644
--- a/contrib/llvm/tools/edis/Makefile
+++ b/contrib/llvm/tools/edis/Makefile
@@ -20,10 +20,12 @@ EXPORTED_SYMBOL_FILE = $(PROJ_SRC_DIR)/EnhancedDisassembly.exports
include $(LEVEL)/Makefile.config
ifeq ($(ENABLE_PIC),1)
+ ifneq ($(DISABLE_EDIS),1)
ifneq ($(TARGET_OS), $(filter $(TARGET_OS), Cygwin MingW))
LINK_LIBS_IN_SHARED = 1
SHARED_LIBRARY = 1
endif
+ endif
endif
LINK_COMPONENTS := $(TARGETS_TO_BUILD) x86asmprinter x86disassembler
diff --git a/contrib/llvm/tools/gold/gold-plugin.cpp b/contrib/llvm/tools/gold/gold-plugin.cpp
index 2e5c179..2d0f5bd 100644
--- a/contrib/llvm/tools/gold/gold-plugin.cpp
+++ b/contrib/llvm/tools/gold/gold-plugin.cpp
@@ -41,6 +41,8 @@ namespace {
ld_plugin_add_symbols add_symbols = NULL;
ld_plugin_get_symbols get_symbols = NULL;
ld_plugin_add_input_file add_input_file = NULL;
+ ld_plugin_add_input_library add_input_library = NULL;
+ ld_plugin_set_extra_library_path set_extra_library_path = NULL;
ld_plugin_message message = discard_message;
int api_version = 0;
@@ -53,46 +55,62 @@ namespace {
};
lto_codegen_model output_type = LTO_CODEGEN_PIC_MODEL_STATIC;
+ std::string output_name = "";
std::list<claimed_file> Modules;
std::vector<sys::Path> Cleanup;
}
namespace options {
+ enum generate_bc { BC_NO, BC_ALSO, BC_ONLY };
static bool generate_api_file = false;
+ static generate_bc generate_bc_file = BC_NO;
static std::string bc_path;
- static const char *as_path = NULL;
+ static std::string as_path;
+ static std::vector<std::string> pass_through;
+ static std::string extra_library_path;
// Additional options to pass into the code generator.
- // Note: This array will contain all plugin options which are not claimed
+ // Note: This array will contain all plugin options which are not claimed
// as plugin exclusive to pass to the code generator.
- // For example, "generate-api-file" and "as"options are for the plugin
+ // For example, "generate-api-file" and "as"options are for the plugin
// use only and will not be passed.
static std::vector<std::string> extra;
- static void process_plugin_option(const char* opt)
+ static void process_plugin_option(const char* opt_)
{
- if (opt == NULL)
+ if (opt_ == NULL)
return;
+ llvm::StringRef opt = opt_;
- if (strcmp("generate-api-file", opt) == 0) {
+ if (opt == "generate-api-file") {
generate_api_file = true;
- } else if (strncmp("as=", opt, 3) == 0) {
- if (as_path) {
+ } else if (opt.startswith("as=")) {
+ if (!as_path.empty()) {
(*message)(LDPL_WARNING, "Path to as specified twice. "
- "Discarding %s", opt);
+ "Discarding %s", opt_);
} else {
- as_path = strdup(opt + 3);
+ as_path = opt.substr(strlen("as="));
}
- } else if(llvm::StringRef(opt).startswith("also-emit-llvm=")) {
- const char *path = opt + strlen("also-emit-llvm=");
- if (bc_path != "") {
+ } else if (opt.startswith("extra-library-path=")) {
+ extra_library_path = opt.substr(strlen("extra_library_path="));
+ } else if (opt.startswith("pass-through=")) {
+ llvm::StringRef item = opt.substr(strlen("pass-through="));
+ pass_through.push_back(item.str());
+ } else if (opt == "emit-llvm") {
+ generate_bc_file = BC_ONLY;
+ } else if (opt == "also-emit-llvm") {
+ generate_bc_file = BC_ALSO;
+ } else if (opt.startswith("also-emit-llvm=")) {
+ llvm::StringRef path = opt.substr(strlen("also-emit-llvm="));
+ generate_bc_file = BC_ALSO;
+ if (!bc_path.empty()) {
(*message)(LDPL_WARNING, "Path to the output IL file specified twice. "
- "Discarding %s", opt);
+ "Discarding %s", opt_);
} else {
bc_path = path;
}
} else {
// Save this option to pass to the code generator.
- extra.push_back(std::string(opt));
+ extra.push_back(opt);
}
}
}
@@ -111,8 +129,6 @@ ld_plugin_status onload(ld_plugin_tv *tv) {
// for services.
bool registeredClaimFile = false;
- bool registeredAllSymbolsRead = false;
- bool registeredCleanup = false;
for (; tv->tv_tag != LDPT_NULL; ++tv) {
switch (tv->tv_tag) {
@@ -122,6 +138,9 @@ ld_plugin_status onload(ld_plugin_tv *tv) {
case LDPT_GOLD_VERSION: // major * 100 + minor
gold_version = tv->tv_u.tv_val;
break;
+ case LDPT_OUTPUT_NAME:
+ output_name = tv->tv_u.tv_string;
+ break;
case LDPT_LINKER_OUTPUT:
switch (tv->tv_u.tv_val) {
case LDPO_REL: // .o
@@ -157,8 +176,6 @@ ld_plugin_status onload(ld_plugin_tv *tv) {
if ((*callback)(all_symbols_read_hook) != LDPS_OK)
return LDPS_ERR;
-
- registeredAllSymbolsRead = true;
} break;
case LDPT_REGISTER_CLEANUP_HOOK: {
ld_plugin_register_cleanup callback;
@@ -166,8 +183,6 @@ ld_plugin_status onload(ld_plugin_tv *tv) {
if ((*callback)(cleanup_hook) != LDPS_OK)
return LDPS_ERR;
-
- registeredCleanup = true;
} break;
case LDPT_ADD_SYMBOLS:
add_symbols = tv->tv_u.tv_add_symbols;
@@ -178,6 +193,12 @@ ld_plugin_status onload(ld_plugin_tv *tv) {
case LDPT_ADD_INPUT_FILE:
add_input_file = tv->tv_u.tv_add_input_file;
break;
+ case LDPT_ADD_INPUT_LIBRARY:
+ add_input_library = tv->tv_u.tv_add_input_file;
+ break;
+ case LDPT_SET_EXTRA_LIBRARY_PATH:
+ set_extra_library_path = tv->tv_u.tv_set_extra_library_path;
+ break;
case LDPT_MESSAGE:
message = tv->tv_u.tv_message;
break;
@@ -209,7 +230,7 @@ static ld_plugin_status claim_file_hook(const ld_plugin_input_file *file,
// an .a archive.
if (lseek(file->fd, file->offset, SEEK_SET) == -1) {
(*message)(LDPL_ERROR,
- "Failed to seek to archive member of %s at offset %d: %s\n",
+ "Failed to seek to archive member of %s at offset %d: %s\n",
file->name,
file->offset, sys::StrError(errno).c_str());
return LDPS_ERR;
@@ -217,7 +238,7 @@ static ld_plugin_status claim_file_hook(const ld_plugin_input_file *file,
buf = malloc(file->filesize);
if (!buf) {
(*message)(LDPL_ERROR,
- "Failed to allocate buffer for archive member of size: %d\n",
+ "Failed to allocate buffer for archive member of size: %d\n",
file->filesize);
return LDPS_ERR;
}
@@ -343,35 +364,33 @@ static ld_plugin_status all_symbols_read_hook(void) {
// If we don't preserve any symbols, libLTO will assume that all symbols are
// needed. Keep all symbols unless we're producing a final executable.
- if (output_type == LTO_CODEGEN_PIC_MODEL_STATIC) {
- bool anySymbolsPreserved = false;
- for (std::list<claimed_file>::iterator I = Modules.begin(),
+ bool anySymbolsPreserved = false;
+ for (std::list<claimed_file>::iterator I = Modules.begin(),
E = Modules.end(); I != E; ++I) {
- (*get_symbols)(I->handle, I->syms.size(), &I->syms[0]);
- for (unsigned i = 0, e = I->syms.size(); i != e; i++) {
- if (I->syms[i].resolution == LDPR_PREVAILING_DEF) {
- lto_codegen_add_must_preserve_symbol(cg, I->syms[i].name);
- anySymbolsPreserved = true;
-
- if (options::generate_api_file)
- api_file << I->syms[i].name << "\n";
- }
+ (*get_symbols)(I->handle, I->syms.size(), &I->syms[0]);
+ for (unsigned i = 0, e = I->syms.size(); i != e; i++) {
+ if (I->syms[i].resolution == LDPR_PREVAILING_DEF) {
+ lto_codegen_add_must_preserve_symbol(cg, I->syms[i].name);
+ anySymbolsPreserved = true;
+
+ if (options::generate_api_file)
+ api_file << I->syms[i].name << "\n";
}
}
+ }
- if (options::generate_api_file)
- api_file.close();
+ if (options::generate_api_file)
+ api_file.close();
- if (!anySymbolsPreserved) {
- // This entire file is unnecessary!
- lto_codegen_dispose(cg);
- return LDPS_OK;
- }
+ if (!anySymbolsPreserved) {
+ // All of the IL is unnecessary!
+ lto_codegen_dispose(cg);
+ return LDPS_OK;
}
lto_codegen_set_pic_model(cg, output_type);
lto_codegen_set_debug_model(cg, LTO_DEBUG_MODEL_DWARF);
- if (options::as_path) {
+ if (!options::as_path.empty()) {
sys::Path p = sys::Program::FindProgramByName(options::as_path);
lto_codegen_set_assembler_path(cg, p.c_str());
}
@@ -383,10 +402,20 @@ static ld_plugin_status all_symbols_read_hook(void) {
}
}
- if (options::bc_path != "") {
- bool err = lto_codegen_write_merged_modules(cg, options::bc_path.c_str());
+
+ if (options::generate_bc_file != options::BC_NO) {
+ std::string path;
+ if (options::generate_bc_file == options::BC_ONLY)
+ path = output_name;
+ else if (!options::bc_path.empty())
+ path = options::bc_path;
+ else
+ path = output_name + ".bc";
+ bool err = lto_codegen_write_merged_modules(cg, path.c_str());
if (err)
(*message)(LDPL_FATAL, "Failed to write the output file.");
+ if (options::generate_bc_file == options::BC_ONLY)
+ exit(0);
}
size_t bufsize = 0;
const char *buffer = static_cast<const char *>(lto_codegen_compile(cg,
@@ -399,26 +428,48 @@ static ld_plugin_status all_symbols_read_hook(void) {
(*message)(LDPL_ERROR, "%s", ErrMsg.c_str());
return LDPS_ERR;
}
- raw_fd_ostream *objFile =
- new raw_fd_ostream(uniqueObjPath.c_str(), ErrMsg,
- raw_fd_ostream::F_Binary);
+ raw_fd_ostream objFile(uniqueObjPath.c_str(), ErrMsg,
+ raw_fd_ostream::F_Binary);
if (!ErrMsg.empty()) {
- delete objFile;
(*message)(LDPL_ERROR, "%s", ErrMsg.c_str());
return LDPS_ERR;
}
- objFile->write(buffer, bufsize);
- objFile->close();
+ objFile.write(buffer, bufsize);
+ objFile.close();
lto_codegen_dispose(cg);
- if ((*add_input_file)(const_cast<char*>(uniqueObjPath.c_str())) != LDPS_OK) {
+ if ((*add_input_file)(uniqueObjPath.c_str()) != LDPS_OK) {
(*message)(LDPL_ERROR, "Unable to add .o file to the link.");
(*message)(LDPL_ERROR, "File left behind in: %s", uniqueObjPath.c_str());
return LDPS_ERR;
}
+ if (!options::extra_library_path.empty() &&
+ set_extra_library_path(options::extra_library_path.c_str()) != LDPS_OK) {
+ (*message)(LDPL_ERROR, "Unable to set the extra library path.");
+ return LDPS_ERR;
+ }
+
+ for (std::vector<std::string>::iterator i = options::pass_through.begin(),
+ e = options::pass_through.end();
+ i != e; ++i) {
+ std::string &item = *i;
+ const char *item_p = item.c_str();
+ if (llvm::StringRef(item).startswith("-l")) {
+ if (add_input_library(item_p + 2) != LDPS_OK) {
+ (*message)(LDPL_ERROR, "Unable to add library to the link.");
+ return LDPS_ERR;
+ }
+ } else {
+ if (add_input_file(item_p) != LDPS_OK) {
+ (*message)(LDPL_ERROR, "Unable to add .o file to the link.");
+ return LDPS_ERR;
+ }
+ }
+ }
+
Cleanup.push_back(uniqueObjPath);
return LDPS_OK;
diff --git a/contrib/llvm/tools/llc/llc.cpp b/contrib/llvm/tools/llc/llc.cpp
index f3eed56..199a1a9 100644
--- a/contrib/llvm/tools/llc/llc.cpp
+++ b/contrib/llvm/tools/llc/llc.cpp
@@ -124,7 +124,8 @@ static formatted_raw_ostream *GetOutputStream(const char *TargetName,
const char *ProgName) {
if (OutputFilename != "") {
if (OutputFilename == "-")
- return &fouts();
+ return new formatted_raw_ostream(outs(),
+ formatted_raw_ostream::PRESERVE_STREAM);
// Make sure that the Out file gets unlinked from the disk if we get a
// SIGINT
@@ -147,7 +148,8 @@ static formatted_raw_ostream *GetOutputStream(const char *TargetName,
if (InputFilename == "-") {
OutputFilename = "-";
- return &fouts();
+ return new formatted_raw_ostream(outs(),
+ formatted_raw_ostream::PRESERVE_STREAM);
}
OutputFilename = GetFileNameRoot(InputFilename);
@@ -332,7 +334,7 @@ int main(int argc, char **argv) {
DisableVerify)) {
errs() << argv[0] << ": target does not support generation of this"
<< " file type!\n";
- if (Out != &fouts()) delete Out;
+ delete Out;
// And the Out file is empty and useless, so remove it now.
sys::Path(OutputFilename).eraseFromDisk();
return 1;
@@ -340,8 +342,8 @@ int main(int argc, char **argv) {
PM.run(mod);
- // Delete the ostream if it's not a stdout stream
- if (Out != &fouts()) delete Out;
+ // Delete the ostream.
+ delete Out;
return 0;
}
diff --git a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
index 276dfd6..e6b5b84 100644
--- a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
+++ b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -112,6 +112,7 @@ int main(int argc, char **argv) {
Passes.add(createGVExtractionPass(GVs, DeleteFn, Relink));
if (!DeleteFn)
Passes.add(createGlobalDCEPass()); // Delete unreachable globals
+ Passes.add(createStripDeadDebugInfoPass()); // Remove dead debug info
Passes.add(createDeadTypeEliminationPass()); // Remove dead types...
Passes.add(createStripDeadPrototypesPass()); // Remove dead func decls
diff --git a/contrib/llvm/tools/llvm-link/llvm-link.cpp b/contrib/llvm/tools/llvm-link/llvm-link.cpp
index c60e56a..f7dad3d 100644
--- a/contrib/llvm/tools/llvm-link/llvm-link.cpp
+++ b/contrib/llvm/tools/llvm-link/llvm-link.cpp
@@ -62,20 +62,14 @@ static inline std::auto_ptr<Module> LoadFile(const char *argv0,
}
SMDiagnostic Err;
- if (Filename.exists()) {
- if (Verbose) errs() << "Loading '" << Filename.c_str() << "'\n";
- Module* Result = 0;
-
- const std::string &FNStr = Filename.str();
- Result = ParseIRFile(FNStr, Err, Context);
- if (Result) return std::auto_ptr<Module>(Result); // Load successful!
-
- if (Verbose)
- Err.Print(argv0, errs());
- } else {
- errs() << "Bitcode file: '" << Filename.c_str() << "' does not exist.\n";
- }
+ if (Verbose) errs() << "Loading '" << Filename.c_str() << "'\n";
+ Module* Result = 0;
+
+ const std::string &FNStr = Filename.str();
+ Result = ParseIRFile(FNStr, Err, Context);
+ if (Result) return std::auto_ptr<Module>(Result); // Load successful!
+ Err.Print(argv0, errs());
return std::auto_ptr<Module>();
}
diff --git a/contrib/llvm/tools/llvm-mc/Makefile b/contrib/llvm/tools/llvm-mc/Makefile
index f92e643..a127493 100644
--- a/contrib/llvm/tools/llvm-mc/Makefile
+++ b/contrib/llvm/tools/llvm-mc/Makefile
@@ -12,7 +12,6 @@ TOOLNAME = llvm-mc
# This tool has no plugins, optimize startup time.
TOOL_NO_EXPORTS = 1
-NO_INSTALL = 1
# Include this here so we can get the configuration of the targets
# that have been configured for construction. We have to do this
diff --git a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
index a114ab0..fc8a1c5 100644
--- a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
+++ b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -312,7 +312,7 @@ static int AssembleInput(const char *ProgName) {
Str.reset(createLoggingStreamer(Str.take(), errs()));
}
- AsmParser Parser(SrcMgr, Ctx, *Str.get(), *MAI);
+ AsmParser Parser(*TheTarget, SrcMgr, Ctx, *Str.get(), *MAI);
OwningPtr<TargetAsmParser> TAP(TheTarget->createAsmParser(Parser));
if (!TAP) {
errs() << ProgName
@@ -323,8 +323,7 @@ static int AssembleInput(const char *ProgName) {
Parser.setTargetParser(*TAP.get());
int Res = Parser.Run(NoInitialTextSection);
- if (Out != &fouts())
- delete Out;
+ delete Out;
// Delete output on errors.
if (Res && OutputFilename != "-")
diff --git a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
index 2baf532..fd7e7f6 100644
--- a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
+++ b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -89,7 +89,8 @@ static char TypeCharForSymbol(GlobalValue &GV) {
static void DumpSymbolNameForGlobalValue(GlobalValue &GV) {
// Private linkage and available_externally linkage don't exist in symtab.
if (GV.hasPrivateLinkage() || GV.hasLinkerPrivateLinkage() ||
- GV.hasAvailableExternallyLinkage()) return;
+ GV.hasLinkerPrivateWeakLinkage() || GV.hasAvailableExternallyLinkage())
+ return;
const std::string SymbolAddrStr = " "; // Not used yet...
char TypeChar = TypeCharForSymbol(GV);
diff --git a/contrib/llvm/tools/llvmc/plugins/Base/Base.td.in b/contrib/llvm/tools/llvmc/plugins/Base/Base.td.in
index 23f46b7..a042997 100644
--- a/contrib/llvm/tools/llvmc/plugins/Base/Base.td.in
+++ b/contrib/llvm/tools/llvmc/plugins/Base/Base.td.in
@@ -262,12 +262,12 @@ def llc : Tool<
]>;
// Base class for linkers
-class llvm_gcc_based_linker <string cmd_prefix> : Tool<
+class llvm_gcc_based_linker <string cmd_prefix, dag on_empty> : Tool<
[(in_language ["object-code", "static-library"]),
(out_language "executable"),
(output_suffix "out"),
(command cmd_prefix),
- (works_on_empty (case (not_empty "filelist"), true,
+ (works_on_empty (case (and (not_empty "filelist"), on_empty), true,
(default), false)),
(join),
(actions (case
@@ -295,9 +295,13 @@ class llvm_gcc_based_linker <string cmd_prefix> : Tool<
]>;
// Default linker
-def llvm_gcc_linker : llvm_gcc_based_linker<"@LLVMGCCCOMMAND@">;
+def llvm_gcc_linker : llvm_gcc_based_linker<"@LLVMGCCCOMMAND@",
+ (not (or (parameter_equals "linker", "g++"),
+ (parameter_equals "linker", "c++")))>;
// Alternative linker for C++
-def llvm_gcc_cpp_linker : llvm_gcc_based_linker<"@LLVMGXXCOMMAND@">;
+def llvm_gcc_cpp_linker : llvm_gcc_based_linker<"@LLVMGXXCOMMAND@",
+ (or (parameter_equals "linker", "g++"),
+ (parameter_equals "linker", "c++"))>;
// Language map
diff --git a/contrib/llvm/tools/lto/LTOCodeGenerator.cpp b/contrib/llvm/tools/lto/LTOCodeGenerator.cpp
index 59e8405..911fddf 100644
--- a/contrib/llvm/tools/lto/LTOCodeGenerator.cpp
+++ b/contrib/llvm/tools/lto/LTOCodeGenerator.cpp
@@ -152,10 +152,12 @@ bool LTOCodeGenerator::writeMergedModules(const char *path,
// write bitcode to it
WriteBitcodeToFile(_linker.getModule(), Out);
-
+ Out.close();
+
if (Out.has_error()) {
errMsg = "could not write bitcode file: ";
errMsg += path;
+ Out.clear_error();
return true;
}
@@ -181,16 +183,14 @@ const void* LTOCodeGenerator::compile(size_t* length, std::string& errMsg)
genResult = this->generateAssemblyCode(asmFile, errMsg);
}
if ( genResult ) {
- if ( uniqueAsmPath.exists() )
- uniqueAsmPath.eraseFromDisk();
+ uniqueAsmPath.eraseFromDisk();
return NULL;
}
// make unique temp .o file to put generated object file
sys::PathWithStatus uniqueObjPath("lto-llvm.o");
if ( uniqueObjPath.createTemporaryFileOnDisk(true, &errMsg) ) {
- if ( uniqueAsmPath.exists() )
- uniqueAsmPath.eraseFromDisk();
+ uniqueAsmPath.eraseFromDisk();
return NULL;
}
sys::RemoveFileOnSignal(uniqueObjPath);
diff --git a/contrib/llvm/tools/opt/GraphPrinters.cpp b/contrib/llvm/tools/opt/GraphPrinters.cpp
index 86f9932..e7c6d1e 100644
--- a/contrib/llvm/tools/opt/GraphPrinters.cpp
+++ b/contrib/llvm/tools/opt/GraphPrinters.cpp
@@ -56,7 +56,7 @@ namespace llvm {
if (Node->getFunction())
return ((Value*)Node->getFunction())->getName();
else
- return "Indirect call node";
+ return "external node";
}
};
}
diff --git a/contrib/llvm/tools/opt/PrintSCC.cpp b/contrib/llvm/tools/opt/PrintSCC.cpp
index 66709ff..ea486ca 100644
--- a/contrib/llvm/tools/opt/PrintSCC.cpp
+++ b/contrib/llvm/tools/opt/PrintSCC.cpp
@@ -102,7 +102,7 @@ bool CallGraphSCC::runOnModule(Module &M) {
for (std::vector<CallGraphNode*>::const_iterator I = nextSCC.begin(),
E = nextSCC.end(); I != E; ++I)
outs() << ((*I)->getFunction() ? (*I)->getFunction()->getNameStr()
- : std::string("Indirect CallGraph node")) << ", ";
+ : std::string("external node")) << ", ";
if (nextSCC.size() == 1 && SCCI.hasLoop())
outs() << " (Has self-loop).";
}
diff --git a/contrib/llvm/tools/opt/opt.cpp b/contrib/llvm/tools/opt/opt.cpp
index 51b920f..0878737 100644
--- a/contrib/llvm/tools/opt/opt.cpp
+++ b/contrib/llvm/tools/opt/opt.cpp
@@ -112,7 +112,7 @@ OptLevelO3("O3",
static cl::opt<bool>
UnitAtATime("funit-at-a-time",
cl::desc("Enable IPO. This is same as llvm-gcc's -funit-at-a-time"),
- cl::init(true));
+ cl::init(true));
static cl::opt<bool>
DisableSimplifyLibCalls("disable-simplify-libcalls",
@@ -377,24 +377,34 @@ int main(int argc, char **argv) {
}
// Figure out what stream we are supposed to write to...
- // FIXME: outs() is not binary!
- raw_ostream *Out = &outs(); // Default to printing to stdout...
- if (OutputFilename != "-") {
- if (NoOutput || AnalyzeOnly) {
- errs() << "WARNING: The -o (output filename) option is ignored when\n"
- "the --disable-output or --analyze options are used.\n";
+ raw_ostream *Out = 0;
+ bool DeleteStream = false;
+ if (!NoOutput && !AnalyzeOnly) {
+ if (OutputFilename == "-") {
+ // Print to stdout.
+ Out = &outs();
+ // If we're printing a bitcode file, switch stdout to binary mode.
+ // FIXME: This switches outs() globally, not just for the bitcode output.
+ if (!OutputAssembly)
+ sys::Program::ChangeStdoutToBinary();
} else {
- // Make sure that the Output file gets unlinked from the disk if we get a
- // SIGINT
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
-
- std::string ErrorInfo;
- Out = new raw_fd_ostream(OutputFilename.c_str(), ErrorInfo,
- raw_fd_ostream::F_Binary);
- if (!ErrorInfo.empty()) {
- errs() << ErrorInfo << '\n';
- delete Out;
- return 1;
+ if (NoOutput || AnalyzeOnly) {
+ errs() << "WARNING: The -o (output filename) option is ignored when\n"
+ "the --disable-output or --analyze options are used.\n";
+ } else {
+ // Make sure that the Output file gets unlinked from the disk if we get
+ // a SIGINT.
+ sys::RemoveFileOnSignal(sys::Path(OutputFilename));
+
+ std::string ErrorInfo;
+ Out = new raw_fd_ostream(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary);
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ delete Out;
+ return 1;
+ }
+ DeleteStream = true;
}
}
}
@@ -540,7 +550,7 @@ int main(int argc, char **argv) {
Passes.run(*M.get());
// Delete the raw_fd_ostream.
- if (Out != &outs())
+ if (DeleteStream)
delete Out;
return 0;
}
diff --git a/contrib/llvm/utils/FileUpdate/FileUpdate.cpp b/contrib/llvm/utils/FileUpdate/FileUpdate.cpp
index 26fd75e..00c2091 100644
--- a/contrib/llvm/utils/FileUpdate/FileUpdate.cpp
+++ b/contrib/llvm/utils/FileUpdate/FileUpdate.cpp
@@ -79,6 +79,7 @@ int main(int argc, char **argv) {
if (OutStream.has_error()) {
errs() << argv[0] << ": Could not open output file '"
<< OutputFilename << "': " << ErrorStr << '\n';
+ OutStream.clear_error();
return 1;
}
diff --git a/contrib/llvm/utils/NewNightlyTest.pl b/contrib/llvm/utils/NewNightlyTest.pl
index 4287cc1..1b48168 100755
--- a/contrib/llvm/utils/NewNightlyTest.pl
+++ b/contrib/llvm/utils/NewNightlyTest.pl
@@ -47,8 +47,8 @@ use Socket;
# -noclean Do not run 'make clean' before building.
# -nobuild Do not build llvm. If tests are enabled perform them
# on the llvm build specified in the build directory
-# -release Build an LLVM Release version
-# -release-asserts Build an LLVM ReleaseAsserts version
+# -release Build an LLVM Release+Asserts version
+# -release-asserts Build an LLVM Release version
# -disable-bindings Disable building LLVM bindings.
# -with-clang Checkout Clang source into tools/clang.
# -compileflags Next argument specifies extra options passed to make when
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
index c879a54..5025691 100644
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.cpp
@@ -1575,6 +1575,15 @@ bool ARMDecoderEmitter::ARMDEBackend::populateInstruction(
if (Name == "BXr9" || Name == "BMOVPCRX" || Name == "BMOVPCRXr9")
return false;
+ // Tail calls are other patterns that generate existing instructions.
+ if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
+ Name == "TCRETURNri" || Name == "TCRETURNriND" ||
+ Name == "TAILJMPd" || Name == "TAILJMPdt" ||
+ Name == "TAILJMPdND" || Name == "TAILJMPdNDt" ||
+ Name == "TAILJMPr" || Name == "TAILJMPrND" ||
+ Name == "MOVr_TC")
+ return false;
+
// VLDMQ/VSTMQ can be hanlded with the more generic VLDMD/VSTMD.
if (Name == "VLDMQ" || Name == "VLDMQ_UPD" ||
Name == "VSTMQ" || Name == "VSTMQ_UPD")
diff --git a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
index 107e085..571a947 100644
--- a/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
+++ b/contrib/llvm/utils/TableGen/ARMDecoderEmitter.h
@@ -31,7 +31,7 @@ public:
~ARMDecoderEmitter() {
shutdownBackend();
}
-
+
// run - Output the code emitter
void run(raw_ostream &o);
diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
index 4ba3df1..e1aa2bc 100644
--- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp
@@ -407,9 +407,9 @@ public:
default:
// This class preceeds the RHS if it is a proper subset of the RHS.
if (isSubsetOf(RHS))
- return true;
+ return true;
if (RHS.isSubsetOf(*this))
- return false;
+ return false;
// Otherwise, order by name to ensure we have a total ordering.
return ValueName < RHS.ValueName;
diff --git a/contrib/llvm/utils/TableGen/CMakeLists.txt b/contrib/llvm/utils/TableGen/CMakeLists.txt
index 731cde9..972989b 100644
--- a/contrib/llvm/utils/TableGen/CMakeLists.txt
+++ b/contrib/llvm/utils/TableGen/CMakeLists.txt
@@ -5,6 +5,7 @@ add_executable(tblgen
AsmWriterInst.cpp
CallingConvEmitter.cpp
ClangASTNodesEmitter.cpp
+ ClangAttrEmitter.cpp
ClangDiagnosticsEmitter.cpp
CodeEmitterGen.cpp
CodeGenDAGPatterns.cpp
@@ -22,6 +23,7 @@ add_executable(tblgen
InstrInfoEmitter.cpp
IntrinsicEmitter.cpp
LLVMCConfigurationEmitter.cpp
+ NeonEmitter.cpp
OptParserEmitter.cpp
Record.cpp
RegisterInfoEmitter.cpp
@@ -39,6 +41,6 @@ target_link_libraries(tblgen LLVMSupport LLVMSystem)
if( MINGW )
target_link_libraries(tblgen imagehlp psapi)
endif( MINGW )
-if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
+if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD AND NOT BEOS )
target_link_libraries(tblgen pthread)
endif()
diff --git a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.cpp b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.cpp
index 5d6423d..187ab46 100644
--- a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -12,33 +12,19 @@
//===----------------------------------------------------------------------===//
#include "ClangASTNodesEmitter.h"
-#include "Record.h"
-#include <map>
-#include <cctype>
+#include <set>
using namespace llvm;
//===----------------------------------------------------------------------===//
// Statement Node Tables (.inc file) generation.
//===----------------------------------------------------------------------===//
-// Create a macro-ized version of a name
-static std::string macroName(std::string S) {
- for (unsigned i = 0; i < S.size(); ++i)
- S[i] = std::toupper(S[i]);
-
- return S;
-}
-
-// A map from a node to each of its derived nodes.
-typedef std::multimap<Record*, Record*> ChildMap;
-typedef ChildMap::const_iterator ChildIterator;
-
// Returns the first and last non-abstract subrecords
// Called recursively to ensure that nodes remain contiguous
-static std::pair<Record *, Record *> EmitStmtNode(const ChildMap &Tree,
- raw_ostream &OS,
- Record *Base,
- bool Root = true) {
+std::pair<Record *, Record *> ClangASTNodesEmitter::EmitNode(
+ const ChildMap &Tree,
+ raw_ostream &OS,
+ Record *Base) {
std::string BaseName = macroName(Base->getName());
ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base);
@@ -60,15 +46,15 @@ static std::pair<Record *, Record *> EmitStmtNode(const ChildMap &Tree,
OS << "#endif\n";
if (Abstract)
- OS << "ABSTRACT_STMT(" << NodeName << "(" << R->getName() << ", "
- << Base->getName() << "))\n";
+ OS << "ABSTRACT_" << macroName(Root.getName()) << "(" << NodeName << "("
+ << R->getName() << ", " << baseName(*Base) << "))\n";
else
OS << NodeName << "(" << R->getName() << ", "
- << Base->getName() << ")\n";
+ << baseName(*Base) << ")\n";
if (Tree.find(R) != Tree.end()) {
const std::pair<Record *, Record *> &Result
- = EmitStmtNode(Tree, OS, R, false);
+ = EmitNode(Tree, OS, R);
if (!First && Result.first)
First = Result.first;
if (Result.second)
@@ -87,11 +73,10 @@ static std::pair<Record *, Record *> EmitStmtNode(const ChildMap &Tree,
if (First) {
assert (Last && "Got a first node but not a last node for a range!");
- if (Root)
- OS << "LAST_STMT_RANGE(";
+ if (Base == &Root)
+ OS << "LAST_" << macroName(Root.getName()) << "_RANGE(";
else
- OS << "STMT_RANGE(";
-
+ OS << macroName(Root.getName()) << "_RANGE(";
OS << Base->getName() << ", " << First->getName() << ", "
<< Last->getName() << ")\n\n";
}
@@ -99,43 +84,82 @@ static std::pair<Record *, Record *> EmitStmtNode(const ChildMap &Tree,
return std::make_pair(First, Last);
}
-void ClangStmtNodesEmitter::run(raw_ostream &OS) {
+void ClangASTNodesEmitter::run(raw_ostream &OS) {
// Write the preamble
- OS << "#ifndef ABSTRACT_STMT\n";
- OS << "# define ABSTRACT_STMT(Stmt) Stmt\n";
+ OS << "#ifndef ABSTRACT_" << macroName(Root.getName()) << "\n";
+ OS << "# define ABSTRACT_" << macroName(Root.getName()) << "(Type) Type\n";
OS << "#endif\n";
- OS << "#ifndef STMT_RANGE\n";
- OS << "# define STMT_RANGE(Base, First, Last)\n";
+ OS << "#ifndef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
OS << "#endif\n\n";
- OS << "#ifndef LAST_STMT_RANGE\n";
- OS << "# define LAST_STMT_RANGE(Base, First, Last) "
- "STMT_RANGE(Base, First, Last)\n";
+ OS << "#ifndef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define LAST_"
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last) "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
OS << "#endif\n\n";
// Emit statements
- const std::vector<Record*> Stmts = Records.getAllDerivedDefinitions("Stmt");
+ const std::vector<Record*> Stmts
+ = Records.getAllDerivedDefinitions(Root.getName());
ChildMap Tree;
- // Create a pseudo-record to serve as the Stmt node, which isn't actually
- // output.
- Record Stmt ("Stmt", SMLoc());
-
for (unsigned i = 0, e = Stmts.size(); i != e; ++i) {
Record *R = Stmts[i];
if (R->getValue("Base"))
Tree.insert(std::make_pair(R->getValueAsDef("Base"), R));
else
- Tree.insert(std::make_pair(&Stmt, R));
+ Tree.insert(std::make_pair(&Root, R));
}
- EmitStmtNode(Tree, OS, &Stmt);
+ EmitNode(Tree, OS, &Root);
+
+ OS << "#undef " << macroName(Root.getName()) << "\n";
+ OS << "#undef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef ABSTRACT_" << macroName(Root.getName()) << "\n";
+}
+
+void ClangDeclContextEmitter::run(raw_ostream &OS) {
+ // FIXME: Find a .td file format to allow for this to be represented better.
+
+ OS << "#ifndef DECL_CONTEXT\n";
+ OS << "# define DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef DECL_CONTEXT_BASE\n";
+ OS << "# define DECL_CONTEXT_BASE(DECL) DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ typedef std::set<Record*> RecordSet;
+ typedef std::vector<Record*> RecordVector;
+
+ RecordVector DeclContextsVector
+ = Records.getAllDerivedDefinitions("DeclContext");
+ RecordVector Decls = Records.getAllDerivedDefinitions("Decl");
+ RecordSet DeclContexts (DeclContextsVector.begin(), DeclContextsVector.end());
+
+ for (RecordVector::iterator i = Decls.begin(), e = Decls.end(); i != e; ++i) {
+ Record *R = *i;
+
+ if (R->getValue("Base")) {
+ Record *B = R->getValueAsDef("Base");
+ if (DeclContexts.find(B) != DeclContexts.end()) {
+ OS << "DECL_CONTEXT_BASE(" << B->getName() << ")\n";
+ DeclContexts.erase(B);
+ }
+ }
+ }
+
+ for (RecordSet::iterator i = DeclContexts.begin(), e = DeclContexts.end();
+ i != e; ++i) {
+ OS << "DECL_CONTEXT(" << (*i)->getName() << ")\n";
+ }
- OS << "#undef STMT\n";
- OS << "#undef STMT_RANGE\n";
- OS << "#undef LAST_STMT_RANGE\n";
- OS << "#undef ABSTRACT_STMT\n";
+ OS << "#undef DECL_CONTEXT\n";
+ OS << "#undef DECL_CONTEXT_BASE\n";
}
diff --git a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
index c4ce9fa..abf9c9a 100644
--- a/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
+++ b/contrib/llvm/utils/TableGen/ClangASTNodesEmitter.h
@@ -15,19 +15,67 @@
#define CLANGAST_EMITTER_H
#include "TableGenBackend.h"
+#include "Record.h"
+#include <string>
+#include <cctype>
+#include <map>
namespace llvm {
-/// ClangStmtNodesEmitter - The top-level class emits .def files containing
+/// ClangASTNodesEmitter - The top-level class emits .inc files containing
/// declarations of Clang statements.
///
-class ClangStmtNodesEmitter : public TableGenBackend {
+class ClangASTNodesEmitter : public TableGenBackend {
+ // A map from a node to each of its derived nodes.
+ typedef std::multimap<Record*, Record*> ChildMap;
+ typedef ChildMap::const_iterator ChildIterator;
+
RecordKeeper &Records;
+ Record Root;
+ const std::string &BaseSuffix;
+
+ // Create a macro-ized version of a name
+ static std::string macroName(std::string S) {
+ for (unsigned i = 0; i < S.size(); ++i)
+ S[i] = std::toupper(S[i]);
+
+ return S;
+ }
+
+ // Return the name to be printed in the base field. Normally this is
+ // the record's name plus the base suffix, but if it is the root node and
+ // the suffix is non-empty, it's just the suffix.
+ std::string baseName(Record &R) {
+ if (&R == &Root && !BaseSuffix.empty())
+ return BaseSuffix;
+
+ return R.getName() + BaseSuffix;
+ }
+
+ std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
+ Record *Base);
+public:
+ explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
+ const std::string &S)
+ : Records(R), Root(N, SMLoc()), BaseSuffix(S)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+/// ClangDeclContextEmitter - Emits an addendum to a .inc file to enumerate the
+/// clang declaration contexts.
+///
+class ClangDeclContextEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
public:
- explicit ClangStmtNodesEmitter(RecordKeeper &R)
- : Records(R) {}
+ explicit ClangDeclContextEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
- // run - Output the .def file contents
+ // run - Output the .inc file contents
void run(raw_ostream &OS);
};
diff --git a/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp
new file mode 100644
index 0000000..fbdd2a7
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/ClangAttrEmitter.cpp
@@ -0,0 +1,84 @@
+//===- ClangAttrEmitter.cpp - Generate Clang attribute handling =-*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangAttrEmitter.h"
+#include "Record.h"
+#include <algorithm>
+
+using namespace llvm;
+
+void ClangAttrClassEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+ OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
+ OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ Record &R = **i;
+
+ if (R.getValueAsBit("DoNotEmit"))
+ continue;
+
+ OS << "class " << R.getName() << "Attr : public Attr {\n";
+
+ std::vector<Record*> Args = R.getValueAsListOfDefs("Args");
+
+ // FIXME: Handle arguments
+ assert(Args.empty() && "Can't yet handle arguments");
+
+ OS << "\n public:\n";
+ OS << " " << R.getName() << "Attr(";
+
+ // Arguments go here
+
+ OS << ")\n";
+ OS << " : Attr(attr::" << R.getName() << ")";
+
+ // Arguments go here
+
+ OS << " {}\n\n";
+
+ OS << " virtual Attr *clone (ASTContext &C) const;\n";
+ OS << " static bool classof(const Attr *A) { return A->getKind() == "
+ << "attr::" << R.getName() << "; }\n";
+ OS << " static bool classof(const " << R.getName()
+ << "Attr *) { return true; }\n";
+ OS << "};\n\n";
+ }
+
+ OS << "#endif\n";
+}
+
+void ClangAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef LAST_ATTR\n";
+ OS << "#define LAST_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+
+ if (i != e) {
+ // Move the end iterator back to emit the last attribute.
+ for(--e; i != e; ++i)
+ OS << "ATTR(" << (*i)->getName() << ")\n";
+
+ OS << "LAST_ATTR(" << (*i)->getName() << ")\n\n";
+ }
+
+ OS << "#undef LAST_ATTR\n";
+ OS << "#undef ATTR\n";
+}
diff --git a/contrib/llvm/utils/TableGen/ClangAttrEmitter.h b/contrib/llvm/utils/TableGen/ClangAttrEmitter.h
new file mode 100644
index 0000000..5ce1c87
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/ClangAttrEmitter.h
@@ -0,0 +1,49 @@
+//===- ClangAttrEmitter.h - Generate Clang attribute handling =-*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGATTR_EMITTER_H
+#define CLANGATTR_EMITTER_H
+
+#include "TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangAttrClassEmitter - class emits the class defintions for attributes for
+/// clang.
+class ClangAttrClassEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrClassEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrListEmitter - class emits the enumeration list for attributes for
+/// clang.
+class ClangAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
index 2a2a4ef..ec702c2a5 100644
--- a/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/contrib/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -24,19 +24,8 @@ void CodeEmitterGen::reverseBits(std::vector<Record*> &Insts) {
for (std::vector<Record*>::iterator I = Insts.begin(), E = Insts.end();
I != E; ++I) {
Record *R = *I;
- if (R->getName() == "PHI" ||
- R->getName() == "INLINEASM" ||
- R->getName() == "DBG_LABEL" ||
- R->getName() == "EH_LABEL" ||
- R->getName() == "GC_LABEL" ||
- R->getName() == "KILL" ||
- R->getName() == "EXTRACT_SUBREG" ||
- R->getName() == "INSERT_SUBREG" ||
- R->getName() == "IMPLICIT_DEF" ||
- R->getName() == "SUBREG_TO_REG" ||
- R->getName() == "COPY_TO_REGCLASS" ||
- R->getName() == "DBG_VALUE" ||
- R->getName() == "REG_SEQUENCE") continue;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
BitsInit *BI = R->getValueAsBitsInit("Inst");
@@ -103,19 +92,7 @@ void CodeEmitterGen::run(raw_ostream &o) {
const CodeGenInstruction *CGI = *IN;
Record *R = CGI->TheDef;
- if (R->getName() == "PHI" ||
- R->getName() == "INLINEASM" ||
- R->getName() == "DBG_LABEL" ||
- R->getName() == "EH_LABEL" ||
- R->getName() == "GC_LABEL" ||
- R->getName() == "KILL" ||
- R->getName() == "EXTRACT_SUBREG" ||
- R->getName() == "INSERT_SUBREG" ||
- R->getName() == "IMPLICIT_DEF" ||
- R->getName() == "SUBREG_TO_REG" ||
- R->getName() == "COPY_TO_REGCLASS" ||
- R->getName() == "DBG_VALUE" ||
- R->getName() == "REG_SEQUENCE") {
+ if (R->getValueAsString("Namespace") == "TargetOpcode") {
o << " 0U,\n";
continue;
}
@@ -140,22 +117,10 @@ void CodeEmitterGen::run(raw_ostream &o) {
for (std::vector<Record*>::iterator IC = Insts.begin(), EC = Insts.end();
IC != EC; ++IC) {
Record *R = *IC;
+ if (R->getValueAsString("Namespace") == "TargetOpcode")
+ continue;
const std::string &InstName = R->getName();
std::string Case("");
-
- if (InstName == "PHI" ||
- InstName == "INLINEASM" ||
- InstName == "DBG_LABEL"||
- InstName == "EH_LABEL"||
- InstName == "GC_LABEL"||
- InstName == "KILL"||
- InstName == "EXTRACT_SUBREG" ||
- InstName == "INSERT_SUBREG" ||
- InstName == "IMPLICIT_DEF" ||
- InstName == "SUBREG_TO_REG" ||
- InstName == "COPY_TO_REGCLASS" ||
- InstName == "DBG_VALUE" ||
- InstName == "REG_SEQUENCE") continue;
BitsInit *BI = R->getValueAsBitsInit("Inst");
const std::vector<RecordVal> &Vals = R->getValues();
diff --git a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
index 99d196c..35b54a5 100644
--- a/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenInstruction.cpp
@@ -107,7 +107,6 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
mayLoad = R->getValueAsBit("mayLoad");
mayStore = R->getValueAsBit("mayStore");
- bool isTwoAddress = R->getValueAsBit("isTwoAddress");
isPredicable = R->getValueAsBit("isPredicable");
isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
isCommutable = R->getValueAsBit("isCommutable");
@@ -212,16 +211,6 @@ CodeGenInstruction::CodeGenInstruction(Record *R, const std::string &AsmStr)
// Parse Constraints.
ParseConstraints(R->getValueAsString("Constraints"), this);
- // For backward compatibility: isTwoAddress means operand 1 is tied to
- // operand 0.
- if (isTwoAddress) {
- if (!OperandList[1].Constraints[0].isNone())
- throw R->getName() + ": cannot use isTwoAddress property: instruction "
- "already has constraint set!";
- OperandList[1].Constraints[0] =
- CodeGenInstruction::ConstraintInfo::getTied(0);
- }
-
// Parse the DisableEncoding field.
std::string DisableEncoding = R->getValueAsString("DisableEncoding");
while (1) {
diff --git a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
index 3797992..d8130fb 100644
--- a/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
+++ b/contrib/llvm/utils/TableGen/CodeGenTarget.cpp
@@ -329,61 +329,42 @@ struct SortInstByName {
/// getInstructionsByEnumValue - Return all of the instructions defined by the
/// target, ordered by their enum value.
void CodeGenTarget::ComputeInstrsByEnum() const {
+ // The ordering here must match the ordering in TargetOpcodes.h.
+ const char *const FixedInstrs[] = {
+ "PHI",
+ "INLINEASM",
+ "DBG_LABEL",
+ "EH_LABEL",
+ "GC_LABEL",
+ "KILL",
+ "EXTRACT_SUBREG",
+ "INSERT_SUBREG",
+ "IMPLICIT_DEF",
+ "SUBREG_TO_REG",
+ "COPY_TO_REGCLASS",
+ "DBG_VALUE",
+ "REG_SEQUENCE",
+ "COPY",
+ 0
+ };
const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
- const CodeGenInstruction *PHI = GetInstByName("PHI", Insts);
- const CodeGenInstruction *INLINEASM = GetInstByName("INLINEASM", Insts);
- const CodeGenInstruction *DBG_LABEL = GetInstByName("DBG_LABEL", Insts);
- const CodeGenInstruction *EH_LABEL = GetInstByName("EH_LABEL", Insts);
- const CodeGenInstruction *GC_LABEL = GetInstByName("GC_LABEL", Insts);
- const CodeGenInstruction *KILL = GetInstByName("KILL", Insts);
- const CodeGenInstruction *EXTRACT_SUBREG =
- GetInstByName("EXTRACT_SUBREG", Insts);
- const CodeGenInstruction *INSERT_SUBREG =
- GetInstByName("INSERT_SUBREG", Insts);
- const CodeGenInstruction *IMPLICIT_DEF = GetInstByName("IMPLICIT_DEF", Insts);
- const CodeGenInstruction *SUBREG_TO_REG =
- GetInstByName("SUBREG_TO_REG", Insts);
- const CodeGenInstruction *COPY_TO_REGCLASS =
- GetInstByName("COPY_TO_REGCLASS", Insts);
- const CodeGenInstruction *DBG_VALUE = GetInstByName("DBG_VALUE", Insts);
- const CodeGenInstruction *REG_SEQUENCE = GetInstByName("REG_SEQUENCE", Insts);
-
- // Print out the rest of the instructions now.
- InstrsByEnum.push_back(PHI);
- InstrsByEnum.push_back(INLINEASM);
- InstrsByEnum.push_back(DBG_LABEL);
- InstrsByEnum.push_back(EH_LABEL);
- InstrsByEnum.push_back(GC_LABEL);
- InstrsByEnum.push_back(KILL);
- InstrsByEnum.push_back(EXTRACT_SUBREG);
- InstrsByEnum.push_back(INSERT_SUBREG);
- InstrsByEnum.push_back(IMPLICIT_DEF);
- InstrsByEnum.push_back(SUBREG_TO_REG);
- InstrsByEnum.push_back(COPY_TO_REGCLASS);
- InstrsByEnum.push_back(DBG_VALUE);
- InstrsByEnum.push_back(REG_SEQUENCE);
-
+ for (const char *const *p = FixedInstrs; *p; ++p) {
+ const CodeGenInstruction *Instr = GetInstByName(*p, Insts);
+ assert(Instr && "Missing target independent instruction");
+ assert(Instr->Namespace == "TargetOpcode" && "Bad namespace");
+ InstrsByEnum.push_back(Instr);
+ }
unsigned EndOfPredefines = InstrsByEnum.size();
-
+
for (DenseMap<const Record*, CodeGenInstruction*>::const_iterator
I = Insts.begin(), E = Insts.end(); I != E; ++I) {
const CodeGenInstruction *CGI = I->second;
- if (CGI != PHI &&
- CGI != INLINEASM &&
- CGI != DBG_LABEL &&
- CGI != EH_LABEL &&
- CGI != GC_LABEL &&
- CGI != KILL &&
- CGI != EXTRACT_SUBREG &&
- CGI != INSERT_SUBREG &&
- CGI != IMPLICIT_DEF &&
- CGI != SUBREG_TO_REG &&
- CGI != COPY_TO_REGCLASS &&
- CGI != DBG_VALUE &&
- CGI != REG_SEQUENCE)
+ if (CGI->Namespace != "TargetOpcode")
InstrsByEnum.push_back(CGI);
}
-
+
+ assert(InstrsByEnum.size() == Insts.size() && "Missing predefined instr");
+
// All of the instructions are now in random order based on the map iteration.
// Sort them by name.
std::sort(InstrsByEnum.begin()+EndOfPredefines, InstrsByEnum.end(),
diff --git a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
index 4473f0d..3750bd8 100644
--- a/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -635,6 +635,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
if (!ComplexPatterns.empty()) {
OS << "bool CheckComplexPattern(SDNode *Root, SDValue N,\n";
OS << " unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {\n";
+ OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: assert(0 && \"Invalid pattern # in table?\");\n";
for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
@@ -645,12 +646,12 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
++NumOps; // Get the chained node too.
OS << " case " << i << ":\n";
- OS << " Result.resize(Result.size()+" << NumOps << ");\n";
+ OS << " Result.resize(NextRes+" << NumOps << ");\n";
OS << " return " << P.getSelectFunc();
OS << "(Root, N";
for (unsigned i = 0; i != NumOps; ++i)
- OS << ", Result[Result.size()-" << (NumOps-i) << ']';
+ OS << ", Result[NextRes+" << i << ']';
OS << ");\n";
}
OS << " }\n";
diff --git a/contrib/llvm/utils/TableGen/EDEmitter.cpp b/contrib/llvm/utils/TableGen/EDEmitter.cpp
index 0d5ee40..c5ee828 100644
--- a/contrib/llvm/utils/TableGen/EDEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/EDEmitter.cpp
@@ -306,6 +306,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
REG("RFP64");
REG("RFP80");
REG("VR128");
+ REG("VR256");
REG("RST");
REG("SEGMENT_REG");
REG("DEBUG_REG");
@@ -339,6 +340,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
MEM("opaque80mem");
MEM("i128mem");
MEM("f128mem");
+ MEM("f256mem");
MEM("opaque512mem");
// all R, I, R, I
@@ -347,6 +349,7 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type,
LEA("lea64mem");
// all I
+ PCR("i16imm_pcrel");
PCR("i32imm_pcrel");
PCR("i64i32imm_pcrel");
PCR("brtarget8");
@@ -500,6 +503,8 @@ static void X86ExtractSemantics(
// TODO add support for fixed operands
} else if (name.find("F") != name.npos) {
// ignore (this pushes onto the FP stack)
+ } else if (name.find("A") != name.npos) {
+ // ignore (pushes all GP registoers onto the stack)
} else if (name[name.length() - 1] == 'm') {
PUSH("src");
} else if (name.find("i") != name.npos) {
@@ -518,6 +523,8 @@ static void X86ExtractSemantics(
// TODO add support for fixed operands
} else if (name.find("F") != name.npos) {
// ignore (this pops from the FP stack)
+ } else if (name.find("A") != name.npos) {
+ // ignore (pushes all GP registoers onto the stack)
} else if (name[name.length() - 1] == 'm') {
POP("dst");
} else {
@@ -570,6 +577,7 @@ static void X86ExtractSemantics(
static int ARMFlagFromOpName(LiteralConstantEmitter *type,
const std::string &name) {
REG("GPR");
+ REG("tcGPR");
REG("cc_out");
REG("s_cc_out");
REG("tGPR");
@@ -592,10 +600,7 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
IMM("msr_mask");
IMM("neg_zero");
IMM("imm0_31");
- IMM("h8imm");
- IMM("h16imm");
- IMM("h32imm");
- IMM("h64imm");
+ IMM("nModImm");
IMM("imm0_4095");
IMM("jt2block_operand");
IMM("t_imm_s4");
diff --git a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
index 9ec9e08..08fc139 100644
--- a/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/FastISelEmitter.cpp
@@ -54,15 +54,15 @@ struct OperandsSignature {
bool initialize(TreePatternNode *InstPatNode,
const CodeGenTarget &Target,
MVT::SimpleValueType VT) {
- if (!InstPatNode->isLeaf() &&
- InstPatNode->getOperator()->getName() == "imm") {
- Operands.push_back("i");
- return true;
- }
- if (!InstPatNode->isLeaf() &&
- InstPatNode->getOperator()->getName() == "fpimm") {
- Operands.push_back("f");
- return true;
+ if (!InstPatNode->isLeaf()) {
+ if (InstPatNode->getOperator()->getName() == "imm") {
+ Operands.push_back("i");
+ return true;
+ }
+ if (InstPatNode->getOperator()->getName() == "fpimm") {
+ Operands.push_back("f");
+ return true;
+ }
}
const CodeGenRegisterClass *DstRC = 0;
@@ -432,11 +432,9 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*MBB, MBB->end(), "
- << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
- << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
- << (*Memo.PhysRegs)[i] << "), "
- << "MRI.getRegClass(Op" << i << "), DL);\n";
+ OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
+ << "TII.get(TargetOpcode::COPY), "
+ << (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
}
OS << " return FastEmitInst_";
@@ -524,14 +522,12 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
HasPred = true;
}
- for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
- if ((*Memo.PhysRegs)[i] != "")
- OS << " TII.copyRegToReg(*MBB, MBB->end(), "
- << (*Memo.PhysRegs)[i] << ", Op" << i << ", "
- << "TM.getRegisterInfo()->getPhysicalRegisterRegClass("
- << (*Memo.PhysRegs)[i] << "), "
- << "MRI.getRegClass(Op" << i << "), DL);\n";
- }
+ for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) {
+ if ((*Memo.PhysRegs)[i] != "")
+ OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, "
+ << "TII.get(TargetOpcode::COPY), "
+ << (*Memo.PhysRegs)[i] << ").addReg(Op" << i << ");\n";
+ }
OS << " return FastEmitInst_";
diff --git a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
index 006a2a1..f28af15 100644
--- a/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -92,7 +92,8 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
else if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += utostr(OpR->getValueAsInt("RegClassKind")) + ", ";
else
- Res += "0, ";
+ // -1 means the operand does not have a fixed register class.
+ Res += "-1, ";
// Fill in applicable flags.
Res += "0";
@@ -301,7 +302,7 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
}
OS << ", 0x";
OS.write_hex(Value);
- OS << ", ";
+ OS << "ULL, ";
// Emit the implicit uses and defs lists...
std::vector<Record*> UseList = Inst.TheDef->getValueAsListOfDefs("Uses");
diff --git a/contrib/llvm/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/utils/TableGen/NeonEmitter.cpp
new file mode 100644
index 0000000..3516d31
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/NeonEmitter.cpp
@@ -0,0 +1,1202 @@
+//===- NeonEmitter.cpp - Generate arm_neon.h for use with clang -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+// Each NEON instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// Additional validation code can be generated by this file when runHeader() is
+// called, rather than the normal run() entry point.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NeonEmitter.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include <string>
+
+using namespace llvm;
+
+/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
+/// which each StringRef representing a single type declared in the string.
+/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
+/// 2xfloat and 4xfloat respectively.
+static void ParseTypes(Record *r, std::string &s,
+ SmallVectorImpl<StringRef> &TV) {
+ const char *data = s.data();
+ int len = 0;
+
+ for (unsigned i = 0, e = s.size(); i != e; ++i, ++len) {
+ if (data[len] == 'P' || data[len] == 'Q' || data[len] == 'U')
+ continue;
+
+ switch (data[len]) {
+ case 'c':
+ case 's':
+ case 'i':
+ case 'l':
+ case 'h':
+ case 'f':
+ break;
+ default:
+ throw TGError(r->getLoc(),
+ "Unexpected letter: " + std::string(data + len, 1));
+ break;
+ }
+ TV.push_back(StringRef(data, len + 1));
+ data += len + 1;
+ len = -1;
+ }
+}
+
+/// Widen - Convert a type code into the next wider type. char -> short,
+/// short -> int, etc.
+static char Widen(const char t) {
+ switch (t) {
+ case 'c':
+ return 's';
+ case 's':
+ return 'i';
+ case 'i':
+ return 'l';
+ default: throw "unhandled type in widen!";
+ }
+ return '\0';
+}
+
+/// Narrow - Convert a type code into the next smaller type. short -> char,
+/// float -> half float, etc.
+static char Narrow(const char t) {
+ switch (t) {
+ case 's':
+ return 'c';
+ case 'i':
+ return 's';
+ case 'l':
+ return 'i';
+ case 'f':
+ return 'h';
+ default: throw "unhandled type in widen!";
+ }
+ return '\0';
+}
+
+/// For a particular StringRef, return the base type code, and whether it has
+/// the quad-vector, polynomial, or unsigned modifiers set.
+static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
+ unsigned off = 0;
+
+ // remember quad.
+ if (ty[off] == 'Q') {
+ quad = true;
+ ++off;
+ }
+
+ // remember poly.
+ if (ty[off] == 'P') {
+ poly = true;
+ ++off;
+ }
+
+ // remember unsigned.
+ if (ty[off] == 'U') {
+ usgn = true;
+ ++off;
+ }
+
+ // base type to get the type string for.
+ return ty[off];
+}
+
+/// ModType - Transform a type code and its modifiers based on a mod code. The
+/// mod code definitions may be found at the top of arm_neon.td.
+static char ModType(const char mod, char type, bool &quad, bool &poly,
+ bool &usgn, bool &scal, bool &cnst, bool &pntr) {
+ switch (mod) {
+ case 't':
+ if (poly) {
+ poly = false;
+ usgn = true;
+ }
+ break;
+ case 'u':
+ usgn = true;
+ case 'x':
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'f':
+ if (type == 'h')
+ quad = true;
+ type = 'f';
+ usgn = false;
+ break;
+ case 'w':
+ type = Widen(type);
+ quad = true;
+ break;
+ case 'n':
+ type = Widen(type);
+ break;
+ case 'l':
+ type = 'l';
+ scal = true;
+ usgn = true;
+ break;
+ case 's':
+ case 'a':
+ scal = true;
+ break;
+ case 'k':
+ quad = true;
+ break;
+ case 'c':
+ cnst = true;
+ case 'p':
+ pntr = true;
+ scal = true;
+ break;
+ case 'h':
+ type = Narrow(type);
+ if (type == 'h')
+ quad = false;
+ break;
+ case 'e':
+ type = Narrow(type);
+ usgn = true;
+ break;
+ default:
+ break;
+ }
+ return type;
+}
+
+/// TypeString - for a modifier and type, generate the name of the typedef for
+/// that type. If generic is true, emit the generic vector type rather than
+/// the public NEON type. QUc -> uint8x8_t / __neon_uint8x8_t.
+static std::string TypeString(const char mod, StringRef typestr,
+ bool generic = false) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "void";
+ if (mod == 'i')
+ return "int";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ SmallString<128> s;
+
+ if (generic)
+ s += "__neon_";
+
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ if (scal)
+ break;
+ s += quad ? "x16" : "x8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'i':
+ s += "int32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ case 'l':
+ s += "int64";
+ if (scal)
+ break;
+ s += quad ? "x2" : "x1";
+ break;
+ case 'h':
+ s += "float16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'f':
+ s += "float32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+
+ if (mod == '2')
+ s += "x2";
+ if (mod == '3')
+ s += "x3";
+ if (mod == '4')
+ s += "x4";
+
+ // Append _t, finishing the type string typedef type.
+ s += "_t";
+
+ if (cnst)
+ s += " const";
+
+ if (pntr)
+ s += " *";
+
+ return s.str();
+}
+
+/// BuiltinTypeString - for a modifier and type, generate the clang
+/// BuiltinsARM.def prototype code for the function. See the top of clang's
+/// Builtins.def for a description of the type strings.
+static std::string BuiltinTypeString(const char mod, StringRef typestr,
+ ClassKind ck, bool ret) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "v";
+ if (mod == 'i')
+ return "i";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ if (pntr) {
+ usgn = false;
+ poly = false;
+ type = 'v';
+ }
+ if (type == 'h') {
+ type = 's';
+ usgn = true;
+ }
+ usgn = usgn | poly | ((ck == ClassI || ck == ClassW) && scal && type != 'f');
+
+ if (scal) {
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('U');
+
+ if (type == 'l')
+ s += "LLi";
+ else
+ s.push_back(type);
+
+ if (cnst)
+ s.push_back('C');
+ if (pntr)
+ s.push_back('*');
+ return s.str();
+ }
+
+ // Since the return value must be one type, return a vector type of the
+ // appropriate width which we will bitcast. An exception is made for
+ // returning structs of 2, 3, or 4 vectors which are returned in a sret-like
+ // fashion, storing them to a pointer arg.
+ if (ret) {
+ if (mod == '2' || mod == '3' || mod == '4')
+ return "vv*";
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16c" : "V8c";
+ }
+
+ // Non-return array types are passed as individual vectors.
+ if (mod == '2')
+ return quad ? "V16cV16c" : "V8cV8c";
+ if (mod == '3')
+ return quad ? "V16cV16cV16c" : "V8cV8cV8c";
+ if (mod == '4')
+ return quad ? "V16cV16cV16cV16c" : "V8cV8cV8cV8c";
+
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16c" : "V8c";
+}
+
+/// StructTag - generate the name of the struct tag for a type.
+/// These names are mandated by ARM's ABI.
+static std::string StructTag(StringRef typestr) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ SmallString<128> s;
+ s += "__simd";
+ s += quad ? "128_" : "64_";
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ break;
+ case 'i':
+ s += "int32";
+ break;
+ case 'l':
+ s += "int64";
+ break;
+ case 'h':
+ s += "float16";
+ break;
+ case 'f':
+ s += "float32";
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+
+ // Append _t, finishing the struct tag name.
+ s += "_t";
+
+ return s.str();
+}
+
+/// MangleName - Append a type or width suffix to a base neon function name,
+/// and insert a 'q' in the appropriate location if the operation works on
+/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
+static std::string MangleName(const std::string &name, StringRef typestr,
+ ClassKind ck) {
+ if (name == "vcvt_f32_f16")
+ return name;
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ std::string s = name;
+
+ switch (type) {
+ case 'c':
+ switch (ck) {
+ case ClassS: s += poly ? "_p8" : usgn ? "_u8" : "_s8"; break;
+ case ClassI: s += "_i8"; break;
+ case ClassW: s += "_8"; break;
+ default: break;
+ }
+ break;
+ case 's':
+ switch (ck) {
+ case ClassS: s += poly ? "_p16" : usgn ? "_u16" : "_s16"; break;
+ case ClassI: s += "_i16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'i':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u32" : "_s32"; break;
+ case ClassI: s += "_i32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ case 'l':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u64" : "_s64"; break;
+ case ClassI: s += "_i64"; break;
+ case ClassW: s += "_64"; break;
+ default: break;
+ }
+ break;
+ case 'h':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'f':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+ if (ck == ClassB)
+ s += "_v";
+
+ // Insert a 'q' before the first '_' character so that it ends up before
+ // _lane or _n on vector-scalar operations.
+ if (quad) {
+ size_t pos = s.find('_');
+ s = s.insert(pos, "q");
+ }
+ return s;
+}
+
+// Generate the string "(argtype a, argtype b, ...)"
+static std::string GenArgs(const std::string &proto, StringRef typestr) {
+ bool define = proto.find('i') != std::string::npos;
+ char arg = 'a';
+
+ std::string s;
+ s += "(";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (!define) {
+ s += TypeString(proto[i], typestr);
+ s.push_back(' ');
+ }
+ s.push_back(arg);
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ s += ")";
+ return s;
+}
+
+static std::string Duplicate(unsigned nElts, StringRef typestr,
+ const std::string &a) {
+ std::string s;
+
+ s = "(__neon_" + TypeString('d', typestr) + "){ ";
+ for (unsigned i = 0; i != nElts; ++i) {
+ s += a;
+ if ((i + 1) < nElts)
+ s += ", ";
+ }
+ s += " }";
+
+ return s;
+}
+
+// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
+// If structTypes is true, the NEON types are structs of vector types rather
+// than vector types, and the call becomes "a.val + b.val"
+static std::string GenOpString(OpKind op, const std::string &proto,
+ StringRef typestr, bool structTypes = true) {
+ bool dummy, quad = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ }
+
+ std::string ts = TypeString(proto[0], typestr);
+ std::string s = ts + " r; r";
+
+ if (structTypes)
+ s += ".val";
+
+ s += " = ";
+
+ std::string a, b, c;
+ if (proto.size() > 1)
+ a = (structTypes && proto[1] != 'l' && proto[1] != 's') ? "a.val" : "a";
+ b = structTypes ? "b.val" : "b";
+ c = structTypes ? "c.val" : "c";
+
+ switch(op) {
+ case OpAdd:
+ s += a + " + " + b;
+ break;
+ case OpSub:
+ s += a + " - " + b;
+ break;
+ case OpMulN:
+ b = Duplicate(nElts << (int)quad, typestr, "b");
+ case OpMul:
+ s += a + " * " + b;
+ break;
+ case OpMlaN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
+ case OpMla:
+ s += a + " + ( " + b + " * " + c + " )";
+ break;
+ case OpMlsN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
+ case OpMls:
+ s += a + " - ( " + b + " * " + c + " )";
+ break;
+ case OpEq:
+ s += "(__neon_" + ts + ")(" + a + " == " + b + ")";
+ break;
+ case OpGe:
+ s += "(__neon_" + ts + ")(" + a + " >= " + b + ")";
+ break;
+ case OpLe:
+ s += "(__neon_" + ts + ")(" + a + " <= " + b + ")";
+ break;
+ case OpGt:
+ s += "(__neon_" + ts + ")(" + a + " > " + b + ")";
+ break;
+ case OpLt:
+ s += "(__neon_" + ts + ")(" + a + " < " + b + ")";
+ break;
+ case OpNeg:
+ s += " -" + a;
+ break;
+ case OpNot:
+ s += " ~" + a;
+ break;
+ case OpAnd:
+ s += a + " & " + b;
+ break;
+ case OpOr:
+ s += a + " | " + b;
+ break;
+ case OpXor:
+ s += a + " ^ " + b;
+ break;
+ case OpAndNot:
+ s += a + " & ~" + b;
+ break;
+ case OpOrNot:
+ s += a + " | ~" + b;
+ break;
+ case OpCast:
+ s += "(__neon_" + ts + ")" + a;
+ break;
+ case OpConcat:
+ s += "__builtin_shufflevector((__neon_int64x1_t)" + a;
+ s += ", (__neon_int64x1_t)" + b + ", 0, 1)";
+ break;
+ case OpHi:
+ s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[1])";
+ break;
+ case OpLo:
+ s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[0])";
+ break;
+ case OpDup:
+ s += Duplicate(nElts << (int)quad, typestr, a);
+ break;
+ case OpSelect:
+ // ((0 & 1) | (~0 & 2))
+ ts = TypeString(proto[1], typestr);
+ s += "( " + a + " & (__neon_" + ts + ")" + b + ") | ";
+ s += "(~" + a + " & (__neon_" + ts + ")" + c + ")";
+ break;
+ case OpRev16:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = 2; i <= nElts << (int)quad; i += 2)
+ for (unsigned j = 0; j != 2; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev32:
+ nElts >>= 1;
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (1 + (int)quad); i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev64:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (int)quad; i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ default:
+ throw "unknown OpKind!";
+ break;
+ }
+ s += "; return r;";
+ return s;
+}
+
+static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
+ unsigned mod = proto[0];
+ unsigned ret = 0;
+
+ if (mod == 'v' || mod == 'f')
+ mod = proto[1];
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ if (usgn)
+ ret |= 0x08;
+ if (quad)
+ ret |= 0x10;
+
+ switch (type) {
+ case 'c':
+ ret |= poly ? 5 : 0;
+ break;
+ case 's':
+ ret |= poly ? 6 : 1;
+ break;
+ case 'i':
+ ret |= 2;
+ break;
+ case 'l':
+ ret |= 3;
+ break;
+ case 'h':
+ ret |= 7;
+ break;
+ case 'f':
+ ret |= 4;
+ break;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+ return ret;
+}
+
+// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
+// If structTypes is true, the NEON types are structs of vector types rather
+// than vector types, and the call becomes __builtin_neon_cls(a.val)
+static std::string GenBuiltin(const std::string &name, const std::string &proto,
+ StringRef typestr, ClassKind ck,
+ bool structTypes = true) {
+ bool dummy, quad = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ }
+ if (quad) nElts <<= 1;
+
+ char arg = 'a';
+ std::string s;
+
+ // If this builtin returns a struct 2, 3, or 4 vectors, pass it as an implicit
+ // sret-like argument.
+ bool sret = (proto[0] == '2' || proto[0] == '3' || proto[0] == '4');
+
+ // If this builtin takes an immediate argument, we need to #define it rather
+ // than use a standard declaration, so that SemaChecking can range check
+ // the immediate passed by the user.
+ bool define = proto.find('i') != std::string::npos;
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ if (proto[0] != 'v') {
+ std::string ts = TypeString(proto[0], typestr);
+
+ if (define) {
+ if (sret)
+ s += "({ " + ts + " r; ";
+ else if (proto[0] != 's')
+ s += "(" + ts + "){(__neon_" + ts + ")";
+ } else if (sret) {
+ s += ts + " r; ";
+ } else {
+ s += ts + " r; r";
+ if (structTypes && proto[0] != 's' && proto[0] != 'i' && proto[0] != 'l')
+ s += ".val";
+
+ s += " = ";
+ }
+ }
+
+ bool splat = proto.find('a') != std::string::npos;
+
+ s += "__builtin_neon_";
+ if (splat) {
+ std::string vname(name, 0, name.size()-2);
+ s += MangleName(vname, typestr, ck);
+ } else {
+ s += MangleName(name, typestr, ck);
+ }
+ s += "(";
+
+ // Pass the address of the return variable as the first argument to sret-like
+ // builtins.
+ if (sret)
+ s += "&r, ";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ std::string args = std::string(&arg, 1);
+ if (define)
+ args = "(" + args + ")";
+
+ // Handle multiple-vector values specially, emitting each subvector as an
+ // argument to the __builtin.
+ if (structTypes && (proto[i] == '2' || proto[i] == '3' || proto[i] == '4')){
+ for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
+ s += args + ".val[" + utostr(vi) + "].val";
+ if ((vi + 1) < ve)
+ s += ", ";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+
+ continue;
+ }
+
+ if (splat && (i + 1) == e)
+ s += Duplicate(nElts, typestr, args);
+ else
+ s += args;
+
+ if (structTypes && proto[i] != 's' && proto[i] != 'i' && proto[i] != 'l' &&
+ proto[i] != 'p' && proto[i] != 'c' && proto[i] != 'a') {
+ s += ".val";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += ", " + utostr(GetNeonEnum(proto, typestr));
+
+ if (define)
+ s += ")";
+ else
+ s += ");";
+
+ if (proto[0] != 'v') {
+ if (define) {
+ if (sret)
+ s += "; r; })";
+ else if (proto[0] != 's')
+ s += "}";
+ } else {
+ s += " return r;";
+ }
+ }
+ return s;
+}
+
+static std::string GenBuiltinDef(const std::string &name,
+ const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s("BUILTIN(__builtin_neon_");
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ s += MangleName(name, typestr, ck);
+ s += ", \"";
+
+ for (unsigned i = 0, e = proto.size(); i != e; ++i)
+ s += BuiltinTypeString(proto[i], typestr, ck, i == 0);
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += "i";
+
+ s += "\", \"n\")";
+ return s;
+}
+
+/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
+/// is comprised of type definitions and function declarations.
+void NeonEmitter::run(raw_ostream &OS) {
+ EmitSourceFileHeader("ARM NEON Header", OS);
+
+ // FIXME: emit license into file?
+
+ OS << "#ifndef __ARM_NEON_H\n";
+ OS << "#define __ARM_NEON_H\n\n";
+
+ OS << "#ifndef __ARM_NEON__\n";
+ OS << "#error \"NEON support not enabled\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#include <stdint.h>\n\n";
+
+ // Emit NEON-specific scalar typedefs.
+ OS << "typedef float float32_t;\n";
+ OS << "typedef uint8_t poly8_t;\n";
+ OS << "typedef uint16_t poly16_t;\n";
+ OS << "typedef uint16_t float16_t;\n";
+
+ // Emit Neon vector typedefs.
+ std::string TypedefTypes("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfPcQPcPsQPs");
+ SmallVector<StringRef, 24> TDTypeVec;
+ ParseTypes(0, TypedefTypes, TDTypeVec);
+
+ // Emit vector typedefs.
+ for (unsigned v = 1; v != 5; ++v) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ bool dummy, quad = false;
+ (void) ClassifyType(TDTypeVec[i], quad, dummy, dummy);
+ OS << "typedef __attribute__(( __vector_size__(";
+
+ OS << utostr(8*v*(quad ? 2 : 1)) << ") )) ";
+ if (!quad)
+ OS << " ";
+
+ OS << TypeString('s', TDTypeVec[i]);
+ OS << " __neon_";
+
+ char t = (v == 1) ? 'd' : '0' + v;
+ OS << TypeString(t, TDTypeVec[i]) << ";\n";
+ }
+ }
+ OS << "\n";
+
+ // Emit struct typedefs.
+ for (unsigned vi = 1; vi != 5; ++vi) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ std::string ts = TypeString('d', TDTypeVec[i], vi == 1);
+ std::string vs = TypeString((vi > 1) ? '0' + vi : 'd', TDTypeVec[i]);
+ std::string tag = (vi > 1) ? vs : StructTag(TDTypeVec[i]);
+ OS << "typedef struct " << tag << " {\n";
+ OS << " " << ts << " val";
+ if (vi > 1)
+ OS << "[" << utostr(vi) << "]";
+ OS << ";\n} " << vs << ";\n\n";
+ }
+ }
+
+ OS << "#define __ai static __attribute__((__always_inline__))\n\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ // Unique the return+pattern types, and assign them.
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ std::string name = LowercaseString(R->getName());
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+
+ bool define = Proto.find('i') != std::string::npos;
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ assert(!Proto.empty() && "");
+
+ // static always inline + return type
+ if (define)
+ OS << "#define";
+ else
+ OS << "__ai " << TypeString(Proto[0], TypeVec[ti]);
+
+ // Function name with type suffix
+ OS << " " << MangleName(name, TypeVec[ti], ClassS);
+
+ // Function arguments
+ OS << GenArgs(Proto, TypeVec[ti]);
+
+ // Definition.
+ if (define)
+ OS << " ";
+ else
+ OS << " { ";
+
+ if (k != OpNone) {
+ OS << GenOpString(k, Proto, TypeVec[ti]);
+ } else {
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ if (ck == ClassNone)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+ OS << GenBuiltin(name, Proto, TypeVec[ti], ck);
+ }
+ if (!define)
+ OS << " }";
+ OS << "\n";
+ }
+ OS << "\n";
+ }
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_NEON_H */\n";
+}
+
+static unsigned RangeFromType(StringRef typestr) {
+ // base type to get the type string for.
+ bool quad = false, dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+
+ switch (type) {
+ case 'c':
+ return (8 << (int)quad) - 1;
+ case 'h':
+ case 's':
+ return (4 << (int)quad) - 1;
+ case 'f':
+ case 'i':
+ return (2 << (int)quad) - 1;
+ case 'l':
+ return (1 << (int)quad) - 1;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+}
+
+/// runHeader - Emit a file with sections defining:
+/// 1. the NEON section of BuiltinsARM.def.
+/// 2. the SemaChecking code for the type overload checking.
+/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
+void NeonEmitter::runHeader(raw_ostream &OS) {
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ StringMap<OpKind> EmittedMap;
+
+ // Generate BuiltinsARM.def for NEON
+ OS << "#ifdef GET_NEON_BUILTINS\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ std::string Types = R->getValueAsString("Types");
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ std::string name = LowercaseString(R->getName());
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the BuiltinsARM.def declaration for this builtin, ensuring
+ // that each unique BUILTIN() macro appears only once in the output
+ // stream.
+ std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
+ if (EmittedMap.count(bd))
+ continue;
+
+ EmittedMap[bd] = OpNone;
+ OS << bd << "\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the overloaded type checking code for SemaChecking.cpp
+ OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ std::string name = LowercaseString(R->getName());
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which have a scalar argument cannot be overloaded, no need to
+ // check them if we are emitting the type checking code.
+ if (Proto.find('s') != std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ int si = -1, qi = -1;
+ unsigned mask = 0, qmask = 0;
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the switch case(s) for this builtin for the type validation.
+ bool quad = false, poly = false, usgn = false;
+ (void) ClassifyType(TypeVec[ti], quad, poly, usgn);
+
+ if (quad) {
+ qi = ti;
+ qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ } else {
+ si = ti;
+ mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ }
+ }
+ if (mask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
+ if (qmask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
+ }
+ OS << "#endif\n\n";
+
+ // Generate the intrinsic range checking code for shift/lane immediates.
+ OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string name = LowercaseString(R->getName());
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which do not have an immediate do not need to have range
+ // checking code emitted.
+ if (Proto.find('i') == std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ std::string namestr, shiftstr, rangestr;
+
+ // Builtins which are overloaded by type will need to have their upper
+ // bound computed at Sema time based on the type constant.
+ if (Proto.find('s') == std::string::npos) {
+ ck = ClassB;
+ if (R->getValueAsBit("isShift")) {
+ shiftstr = ", true";
+
+ // Right shifts have an 'r' in the name, left shifts do not.
+ if (name.find('r') != std::string::npos)
+ rangestr = "l = 1; ";
+ }
+ rangestr += "u = RFT(TV" + shiftstr + ")";
+ } else {
+ rangestr = "u = " + utostr(RangeFromType(TypeVec[ti]));
+ }
+ // Make sure cases appear only once by uniquing them in a string map.
+ namestr = MangleName(name, TypeVec[ti], ck);
+ if (EmittedMap.count(namestr))
+ continue;
+ EmittedMap[namestr] = OpNone;
+
+ // Calculate the index of the immediate that should be range checked.
+ unsigned immidx = 0;
+
+ // Builtins that return a struct of multiple vectors have an extra
+ // leading arg for the struct return.
+ if (Proto[0] == '2' || Proto[0] == '3' || Proto[0] == '4')
+ ++immidx;
+
+ // Add one to the index for each argument until we reach the immediate
+ // to be checked. Structs of vectors are passed as multiple arguments.
+ for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
+ switch (Proto[ii]) {
+ default: immidx += 1; break;
+ case '2': immidx += 2; break;
+ case '3': immidx += 3; break;
+ case '4': immidx += 4; break;
+ case 'i': ie = ii + 1; break;
+ }
+ }
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ << ": i = " << immidx << "; " << rangestr << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+}
diff --git a/contrib/llvm/utils/TableGen/NeonEmitter.h b/contrib/llvm/utils/TableGen/NeonEmitter.h
new file mode 100644
index 0000000..6c6760d
--- /dev/null
+++ b/contrib/llvm/utils/TableGen/NeonEmitter.h
@@ -0,0 +1,122 @@
+//===- NeonEmitter.h - Generate arm_neon.h for use with clang ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NEON_EMITTER_H
+#define NEON_EMITTER_H
+
+#include "Record.h"
+#include "TableGenBackend.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+enum OpKind {
+ OpNone,
+ OpAdd,
+ OpSub,
+ OpMul,
+ OpMla,
+ OpMls,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
+ OpEq,
+ OpGe,
+ OpLe,
+ OpGt,
+ OpLt,
+ OpNeg,
+ OpNot,
+ OpAnd,
+ OpOr,
+ OpXor,
+ OpAndNot,
+ OpOrNot,
+ OpCast,
+ OpConcat,
+ OpDup,
+ OpHi,
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64
+};
+
+enum ClassKind {
+ ClassNone,
+ ClassI,
+ ClassS,
+ ClassW,
+ ClassB
+};
+
+namespace llvm {
+
+ class NeonEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ StringMap<OpKind> OpMap;
+ DenseMap<Record*, ClassKind> ClassMap;
+
+ public:
+ NeonEmitter(RecordKeeper &R) : Records(R) {
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
+
+ Record *SI = R.getClass("SInst");
+ Record *II = R.getClass("IInst");
+ Record *WI = R.getClass("WInst");
+ ClassMap[SI] = ClassS;
+ ClassMap[II] = ClassI;
+ ClassMap[WI] = ClassW;
+ }
+
+ // run - Emit arm_neon.h.inc
+ void run(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h
+ void runHeader(raw_ostream &o);
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/utils/TableGen/Record.cpp b/contrib/llvm/utils/TableGen/Record.cpp
index 4f9f604..d2cf379 100644
--- a/contrib/llvm/utils/TableGen/Record.cpp
+++ b/contrib/llvm/utils/TableGen/Record.cpp
@@ -270,7 +270,15 @@ Init *RecordRecTy::convertValue(TypedInit *TI) {
}
bool RecordRecTy::baseClassOf(const RecordRecTy *RHS) const {
- return Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec);
+ if (Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec))
+ return true;
+
+ const std::vector<Record*> &SC = Rec->getSuperClasses();
+ for (unsigned i = 0, e = SC.size(); i != e; ++i)
+ if (RHS->getRecord()->isSubClassOf(SC[i]))
+ return true;
+
+ return false;
}
@@ -721,9 +729,20 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
break;
}
case EQ: {
- // Make sure we've resolved
+ // try to fold eq comparison for 'bit' and 'int', otherwise fallback
+ // to string objects.
+ IntInit* L =
+ dynamic_cast<IntInit*>(LHS->convertInitializerTo(new IntRecTy()));
+ IntInit* R =
+ dynamic_cast<IntInit*>(RHS->convertInitializerTo(new IntRecTy()));
+
+ if (L && R)
+ return new IntInit(L->getValue() == R->getValue());
+
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+
+ // Make sure we've resolved
if (LHSs && RHSs)
return new IntInit(LHSs->getValue() == RHSs->getValue());
@@ -971,6 +990,8 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) {
case IF: {
IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
+ if (Init *I = LHS->convertInitializerTo(new IntRecTy()))
+ LHSi = dynamic_cast<IntInit*>(I);
if (LHSi) {
if (LHSi->getValue()) {
return MHS;
@@ -990,6 +1011,8 @@ Init *TernOpInit::resolveReferences(Record &R, const RecordVal *RV) {
if (Opc == IF && lhs != LHS) {
IntInit *Value = dynamic_cast<IntInit*>(lhs);
+ if (Init *I = lhs->convertInitializerTo(new IntRecTy()))
+ Value = dynamic_cast<IntInit*>(I);
if (Value != 0) {
// Short-circuit
if (Value->getValue()) {
@@ -1239,7 +1262,7 @@ Init *DagInit::resolveReferences(Record &R, const RecordVal *RV) {
Init *Op = Val->resolveReferences(R, RV);
if (Args != NewArgs || Op != Val)
- return new DagInit(Op, "", NewArgs, ArgNames);
+ return new DagInit(Op, ValName, NewArgs, ArgNames);
return this;
}
diff --git a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index c99bbd9..a3ca0bc 100644
--- a/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/contrib/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -82,6 +82,7 @@ void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
<< " { return false; }\n"
<< " unsigned getSubReg(unsigned RegNo, unsigned Index) const;\n"
<< " unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;\n"
+ << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n"
<< "};\n\n";
const std::vector<CodeGenRegisterClass> &RegisterClasses =
@@ -95,7 +96,7 @@ void RegisterInfoEmitter::runHeader(raw_ostream &OS) {
for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
if (i) OS << ",\n";
OS << " " << RegisterClasses[i].getName() << "RegClassID";
- OS << " = " << (i+1);
+ OS << " = " << i;
}
OS << "\n };\n\n";
@@ -171,14 +172,28 @@ static void addSubSuperReg(Record *R, Record *S,
addSubSuperReg(R, *I, SubRegs, SuperRegs, Aliases);
}
-// Map SubRegIndex -> Register
-typedef std::map<Record*, Record*, LessRecord> SubRegMap;
-// Map Register -> SubRegMap
-typedef std::map<Record*, SubRegMap> AllSubRegMap;
+struct RegisterMaps {
+ // Map SubRegIndex -> Register
+ typedef std::map<Record*, Record*, LessRecord> SubRegMap;
+ // Map Register -> SubRegMap
+ typedef std::map<Record*, SubRegMap> SubRegMaps;
+
+ SubRegMaps SubReg;
+ SubRegMap &inferSubRegIndices(Record *Reg);
+
+ // Composite SubRegIndex instances.
+ // Map (SubRegIndex,SubRegIndex) -> SubRegIndex
+ typedef DenseMap<std::pair<Record*,Record*>,Record*> CompositeMap;
+ CompositeMap Composite;
+
+ // Compute SubRegIndex compositions after inferSubRegIndices has run on all
+ // registers.
+ void computeComposites();
+};
// Calculate all subregindices for Reg. Loopy subregs cause infinite recursion.
-static SubRegMap &inferSubRegIndices(Record *Reg, AllSubRegMap &ASRM) {
- SubRegMap &SRM = ASRM[Reg];
+RegisterMaps::SubRegMap &RegisterMaps::inferSubRegIndices(Record *Reg) {
+ SubRegMap &SRM = SubReg[Reg];
if (!SRM.empty())
return SRM;
std::vector<Record*> SubRegs = Reg->getValueAsListOfDefs("SubRegs");
@@ -191,7 +206,7 @@ static SubRegMap &inferSubRegIndices(Record *Reg, AllSubRegMap &ASRM) {
if (!SRM.insert(std::make_pair(Indices[i], SubRegs[i])).second)
throw "SubRegIndex " + Indices[i]->getName()
+ " appears twice in Register " + Reg->getName();
- inferSubRegIndices(SubRegs[i], ASRM);
+ inferSubRegIndices(SubRegs[i]);
}
// Keep track of inherited subregs and how they can be reached.
@@ -202,7 +217,7 @@ static SubRegMap &inferSubRegIndices(Record *Reg, AllSubRegMap &ASRM) {
// Clone inherited subregs. Here the order is important - earlier subregs take
// precedence.
for (unsigned i = 0, e = SubRegs.size(); i != e; ++i) {
- SubRegMap &M = ASRM[SubRegs[i]];
+ SubRegMap &M = SubReg[SubRegs[i]];
for (SubRegMap::iterator si = M.begin(), se = M.end(); si != se; ++si)
if (!SRM.insert(*si).second)
Orphans[si->second] = std::make_pair(Indices[i], si->first);
@@ -226,8 +241,8 @@ static SubRegMap &inferSubRegIndices(Record *Reg, AllSubRegMap &ASRM) {
DefInit *IdxInit = dynamic_cast<DefInit*>(*di);
if (!IdxInit || !IdxInit->getDef()->isSubClassOf("SubRegIndex"))
throw "Invalid SubClassIndex in " + Pat->getAsString();
- SubRegMap::const_iterator ni = ASRM[R2].find(IdxInit->getDef());
- if (ni == ASRM[R2].end())
+ SubRegMap::const_iterator ni = SubReg[R2].find(IdxInit->getDef());
+ if (ni == SubReg[R2].end())
throw "Composite " + Pat->getAsString() + " refers to bad index in "
+ R2->getName();
R2 = ni->second;
@@ -255,6 +270,62 @@ static SubRegMap &inferSubRegIndices(Record *Reg, AllSubRegMap &ASRM) {
return SRM;
}
+void RegisterMaps::computeComposites() {
+ for (SubRegMaps::const_iterator sri = SubReg.begin(), sre = SubReg.end();
+ sri != sre; ++sri) {
+ Record *Reg1 = sri->first;
+ const SubRegMap &SRM1 = sri->second;
+ for (SubRegMap::const_iterator i1 = SRM1.begin(), e1 = SRM1.end();
+ i1 != e1; ++i1) {
+ Record *Idx1 = i1->first;
+ Record *Reg2 = i1->second;
+ // Ignore identity compositions.
+ if (Reg1 == Reg2)
+ continue;
+ // If Reg2 has no subregs, Idx1 doesn't compose.
+ if (!SubReg.count(Reg2))
+ continue;
+ const SubRegMap &SRM2 = SubReg[Reg2];
+ // Try composing Idx1 with another SubRegIndex.
+ for (SubRegMap::const_iterator i2 = SRM2.begin(), e2 = SRM2.end();
+ i2 != e2; ++i2) {
+ std::pair<Record*,Record*> IdxPair(Idx1, i2->first);
+ Record *Reg3 = i2->second;
+ // OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
+ for (SubRegMap::const_iterator i1d = SRM1.begin(), e1d = SRM1.end();
+ i1d != e1d; ++i1d) {
+ // Ignore identity compositions.
+ if (Reg2 == Reg3)
+ continue;
+ if (i1d->second == Reg3) {
+ std::pair<CompositeMap::iterator,bool> Ins =
+ Composite.insert(std::make_pair(IdxPair, i1d->first));
+ // Conflicting composition?
+ if (!Ins.second && Ins.first->second != i1d->first) {
+ errs() << "Error: SubRegIndex " << getQualifiedName(Idx1)
+ << " and " << getQualifiedName(IdxPair.second)
+ << " compose ambiguously as "
+ << getQualifiedName(Ins.first->second) << " or "
+ << getQualifiedName(i1d->first) << "\n";
+ abort();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid
+ // compositions, so remove any mappings of that form.
+ for (CompositeMap::iterator i = Composite.begin(), e = Composite.end();
+ i != e;) {
+ CompositeMap::iterator j = i;
+ ++i;
+ if (j->first.second == j->second)
+ Composite.erase(j);
+ }
+}
+
class RegisterSorter {
private:
std::map<Record*, std::set<Record*>, LessRecord> &RegisterSubRegs;
@@ -836,7 +907,7 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
std::string ClassName = Target.getName() + "GenRegisterInfo";
// Calculate the mapping of subregister+index pairs to physical registers.
- AllSubRegMap AllSRM;
+ RegisterMaps RegMaps;
// Emit the subregister + index mapping function based on the information
// calculated above.
@@ -845,14 +916,14 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
<< " switch (RegNo) {\n"
<< " default:\n return 0;\n";
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- SubRegMap &SRM = inferSubRegIndices(Regs[i].TheDef, AllSRM);
+ RegisterMaps::SubRegMap &SRM = RegMaps.inferSubRegIndices(Regs[i].TheDef);
if (SRM.empty())
continue;
OS << " case " << getQualifiedName(Regs[i].TheDef) << ":\n";
OS << " switch (Index) {\n";
OS << " default: return 0;\n";
- for (SubRegMap::const_iterator ii = SRM.begin(), ie = SRM.end(); ii != ie;
- ++ii)
+ for (RegisterMaps::SubRegMap::const_iterator ii = SRM.begin(),
+ ie = SRM.end(); ii != ie; ++ii)
OS << " case " << getQualifiedName(ii->first)
<< ": return " << getQualifiedName(ii->second) << ";\n";
OS << " };\n" << " break;\n";
@@ -866,12 +937,12 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
<< " switch (RegNo) {\n"
<< " default:\n return 0;\n";
for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
- SubRegMap &SRM = AllSRM[Regs[i].TheDef];
+ RegisterMaps::SubRegMap &SRM = RegMaps.SubReg[Regs[i].TheDef];
if (SRM.empty())
continue;
OS << " case " << getQualifiedName(Regs[i].TheDef) << ":\n";
- for (SubRegMap::const_iterator ii = SRM.begin(), ie = SRM.end(); ii != ie;
- ++ii)
+ for (RegisterMaps::SubRegMap::const_iterator ii = SRM.begin(),
+ ie = SRM.end(); ii != ie; ++ii)
OS << " if (SubRegNo == " << getQualifiedName(ii->second)
<< ") return " << getQualifiedName(ii->first) << ";\n";
OS << " return 0;\n";
@@ -879,7 +950,32 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
OS << " };\n";
OS << " return 0;\n";
OS << "}\n\n";
-
+
+ // Emit composeSubRegIndices
+ RegMaps.computeComposites();
+ OS << "unsigned " << ClassName
+ << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n"
+ << " switch (IdxA) {\n"
+ << " default:\n return IdxB;\n";
+ for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ bool Open = false;
+ for (unsigned j = 0; j != e; ++j) {
+ if (Record *Comp = RegMaps.Composite.lookup(
+ std::make_pair(SubRegIndices[i], SubRegIndices[j]))) {
+ if (!Open) {
+ OS << " case " << getQualifiedName(SubRegIndices[i])
+ << ": switch(IdxB) {\n default: return IdxB;\n";
+ Open = true;
+ }
+ OS << " case " << getQualifiedName(SubRegIndices[j])
+ << ": return " << getQualifiedName(Comp) << ";\n";
+ }
+ }
+ if (Open)
+ OS << " }\n";
+ }
+ OS << " }\n}\n\n";
+
// Emit the constructor of the class...
OS << ClassName << "::" << ClassName
<< "(int CallFrameSetupOpcode, int CallFrameDestroyOpcode)\n"
diff --git a/contrib/llvm/utils/TableGen/TGParser.cpp b/contrib/llvm/utils/TableGen/TGParser.cpp
index 8c158e0..f81aabe 100644
--- a/contrib/llvm/utils/TableGen/TGParser.cpp
+++ b/contrib/llvm/utils/TableGen/TGParser.cpp
@@ -1635,13 +1635,12 @@ bool TGParser::ParseObjectBody(Record *CurRec) {
return ParseBody(CurRec);
}
-
/// ParseDef - Parse and return a top level or multiclass def, return the record
/// corresponding to it. This returns null on error.
///
/// DefInst ::= DEF ObjectName ObjectBody
///
-llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
+bool TGParser::ParseDef(MultiClass *CurMultiClass) {
SMLoc DefLoc = Lex.getLoc();
assert(Lex.getCode() == tgtok::Def && "Unknown tok");
Lex.Lex(); // Eat the 'def' token.
@@ -1655,7 +1654,7 @@ llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
// Ensure redefinition doesn't happen.
if (Records.getDef(CurRec->getName())) {
Error(DefLoc, "def '" + CurRec->getName() + "' already defined");
- return 0;
+ return true;
}
Records.addDef(CurRec);
} else {
@@ -1664,20 +1663,33 @@ llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
Error(DefLoc, "def '" + CurRec->getName() +
"' already defined in this multiclass!");
- return 0;
+ return true;
}
CurMultiClass->DefPrototypes.push_back(CurRec);
}
if (ParseObjectBody(CurRec))
- return 0;
+ return true;
if (CurMultiClass == 0) // Def's in multiclasses aren't really defs.
CurRec->resolveReferences();
// If ObjectBody has template arguments, it's an error.
assert(CurRec->getTemplateArgs().empty() && "How'd this get template args?");
- return CurRec;
+
+ if (CurMultiClass) {
+ // Copy the template arguments for the multiclass into the def.
+ const std::vector<std::string> &TArgs =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TArgs[i]);
+ assert(RV && "Template arg doesn't exist?");
+ CurRec->addValue(*RV);
+ }
+ }
+
+ return false;
}
@@ -1758,12 +1770,12 @@ std::vector<LetRecord> TGParser::ParseLetList() {
}
/// ParseTopLevelLet - Parse a 'let' at top level. This can be a couple of
-/// different related productions.
+/// different related productions. This works inside multiclasses too.
///
/// Object ::= LET LetList IN '{' ObjectList '}'
/// Object ::= LET LetList IN Object
///
-bool TGParser::ParseTopLevelLet() {
+bool TGParser::ParseTopLevelLet(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Let && "Unexpected token");
Lex.Lex();
@@ -1779,7 +1791,7 @@ bool TGParser::ParseTopLevelLet() {
// If this is a scalar let, just handle it now
if (Lex.getCode() != tgtok::l_brace) {
// LET LetList IN Object
- if (ParseObject())
+ if (ParseObject(CurMultiClass))
return true;
} else { // Object ::= LETCommand '{' ObjectList '}'
SMLoc BraceLoc = Lex.getLoc();
@@ -1787,7 +1799,7 @@ bool TGParser::ParseTopLevelLet() {
Lex.Lex(); // eat the '{'.
// Parse the object list.
- if (ParseObjectList())
+ if (ParseObjectList(CurMultiClass))
return true;
if (Lex.getCode() != tgtok::r_brace) {
@@ -1802,29 +1814,6 @@ bool TGParser::ParseTopLevelLet() {
return false;
}
-/// ParseMultiClassDef - Parse a def in a multiclass context.
-///
-/// MultiClassDef ::= DefInst
-///
-bool TGParser::ParseMultiClassDef(MultiClass *CurMC) {
- if (Lex.getCode() != tgtok::Def)
- return TokError("expected 'def' in multiclass body");
-
- Record *D = ParseDef(CurMC);
- if (D == 0) return true;
-
- // Copy the template arguments for the multiclass into the def.
- const std::vector<std::string> &TArgs = CurMC->Rec.getTemplateArgs();
-
- for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
- const RecordVal *RV = CurMC->Rec.getValue(TArgs[i]);
- assert(RV && "Template arg doesn't exist?");
- D->addValue(*RV);
- }
-
- return false;
-}
-
/// ParseMultiClass - Parse a multiclass definition.
///
/// MultiClassInst ::= MULTICLASS ID TemplateArgList?
@@ -1885,10 +1874,18 @@ bool TGParser::ParseMultiClass() {
if (Lex.Lex() == tgtok::r_brace) // eat the '{'.
return TokError("multiclass must contain at least one def");
- while (Lex.getCode() != tgtok::r_brace)
- if (ParseMultiClassDef(CurMultiClass))
- return true;
-
+ while (Lex.getCode() != tgtok::r_brace) {
+ switch (Lex.getCode()) {
+ default:
+ return TokError("expected 'let', 'def' or 'defm' in multiclass body");
+ case tgtok::Let:
+ case tgtok::Def:
+ case tgtok::Defm:
+ if (ParseObject(CurMultiClass))
+ return true;
+ break;
+ }
+ }
Lex.Lex(); // eat the '}'.
}
@@ -1900,7 +1897,7 @@ bool TGParser::ParseMultiClass() {
///
/// DefMInst ::= DEFM ID ':' DefmSubClassRef ';'
///
-bool TGParser::ParseDefm() {
+bool TGParser::ParseDefm(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Defm && "Unexpected token!");
if (Lex.Lex() != tgtok::Id) // eat the defm.
return TokError("expected identifier after defm");
@@ -1910,6 +1907,12 @@ bool TGParser::ParseDefm() {
if (Lex.Lex() != tgtok::colon)
return TokError("expected ':' after defm identifier");
+ // Keep track of the new generated record definitions.
+ std::vector<Record*> NewRecDefs;
+
+ // This record also inherits from a regular class (non-multiclass)?
+ bool InheritFromClass = false;
+
// eat the colon.
Lex.Lex();
@@ -1991,17 +1994,87 @@ bool TGParser::ParseDefm() {
return Error(DefmPrefixLoc, "def '" + CurRec->getName() +
"' already defined, instantiating defm with subdef '" +
DefProto->getName() + "'");
- Records.addDef(CurRec);
- CurRec->resolveReferences();
+
+ // Don't create a top level definition for defm inside multiclasses,
+ // instead, only update the prototypes and bind the template args
+ // with the new created definition.
+ if (CurMultiClass) {
+ for (unsigned i = 0, e = CurMultiClass->DefPrototypes.size();
+ i != e; ++i) {
+ if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
+ Error(DefmPrefixLoc, "defm '" + CurRec->getName() +
+ "' already defined in this multiclass!");
+ return 0;
+ }
+ }
+ CurMultiClass->DefPrototypes.push_back(CurRec);
+
+ // Copy the template arguments for the multiclass into the new def.
+ const std::vector<std::string> &TA =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TA.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TA[i]);
+ assert(RV && "Template arg doesn't exist?");
+ CurRec->addValue(*RV);
+ }
+ } else {
+ Records.addDef(CurRec);
+ }
+
+ NewRecDefs.push_back(CurRec);
}
if (Lex.getCode() != tgtok::comma) break;
Lex.Lex(); // eat ','.
SubClassLoc = Lex.getLoc();
+
+ // A defm can inherit from regular classes (non-multiclass) as
+ // long as they come in the end of the inheritance list.
+ InheritFromClass = (Records.getClass(Lex.getCurStrVal()) != 0);
+
+ if (InheritFromClass)
+ break;
+
Ref = ParseSubClassReference(0, true);
}
+ if (InheritFromClass) {
+ // Process all the classes to inherit as if they were part of a
+ // regular 'def' and inherit all record values.
+ SubClassReference SubClass = ParseSubClassReference(0, false);
+ while (1) {
+ // Check for error.
+ if (SubClass.Rec == 0) return true;
+
+ // Get the expanded definition prototypes and teach them about
+ // the record values the current class to inherit has
+ for (unsigned i = 0, e = NewRecDefs.size(); i != e; ++i) {
+ Record *CurRec = NewRecDefs[i];
+
+ // Add it.
+ if (AddSubClass(CurRec, SubClass))
+ return true;
+
+ // Process any variables on the let stack.
+ for (unsigned i = 0, e = LetStack.size(); i != e; ++i)
+ for (unsigned j = 0, e = LetStack[i].size(); j != e; ++j)
+ if (SetValue(CurRec, LetStack[i][j].Loc, LetStack[i][j].Name,
+ LetStack[i][j].Bits, LetStack[i][j].Value))
+ return true;
+ }
+
+ if (Lex.getCode() != tgtok::comma) break;
+ Lex.Lex(); // eat ','.
+ SubClass = ParseSubClassReference(0, false);
+ }
+ }
+
+ if (!CurMultiClass)
+ for (unsigned i = 0, e = NewRecDefs.size(); i != e; ++i)
+ NewRecDefs[i]->resolveReferences();
+
if (Lex.getCode() != tgtok::semi)
return TokError("expected ';' at end of defm");
Lex.Lex();
@@ -2016,12 +2089,12 @@ bool TGParser::ParseDefm() {
/// Object ::= DefMInst
/// Object ::= LETCommand '{' ObjectList '}'
/// Object ::= LETCommand Object
-bool TGParser::ParseObject() {
+bool TGParser::ParseObject(MultiClass *MC) {
switch (Lex.getCode()) {
default: assert(0 && "This is not an object");
- case tgtok::Let: return ParseTopLevelLet();
- case tgtok::Def: return ParseDef(0) == 0;
- case tgtok::Defm: return ParseDefm();
+ case tgtok::Let: return ParseTopLevelLet(MC);
+ case tgtok::Def: return ParseDef(MC);
+ case tgtok::Defm: return ParseDefm(MC);
case tgtok::Class: return ParseClass();
case tgtok::MultiClass: return ParseMultiClass();
}
@@ -2029,9 +2102,9 @@ bool TGParser::ParseObject() {
/// ParseObjectList
/// ObjectList :== Object*
-bool TGParser::ParseObjectList() {
+bool TGParser::ParseObjectList(MultiClass *MC) {
while (isObjectStart(Lex.getCode())) {
- if (ParseObject())
+ if (ParseObject(MC))
return true;
}
return false;
diff --git a/contrib/llvm/utils/TableGen/TGParser.h b/contrib/llvm/utils/TableGen/TGParser.h
index 9f4b634..0aee931 100644
--- a/contrib/llvm/utils/TableGen/TGParser.h
+++ b/contrib/llvm/utils/TableGen/TGParser.h
@@ -69,16 +69,15 @@ private: // Semantic analysis methods.
SubMultiClassReference &SubMultiClass);
private: // Parser methods.
- bool ParseObjectList();
- bool ParseObject();
+ bool ParseObjectList(MultiClass *MC = 0);
+ bool ParseObject(MultiClass *MC);
bool ParseClass();
bool ParseMultiClass();
- bool ParseMultiClassDef(MultiClass *CurMC);
- bool ParseDefm();
- bool ParseTopLevelLet();
+ bool ParseDefm(MultiClass *CurMultiClass);
+ bool ParseDef(MultiClass *CurMultiClass);
+ bool ParseTopLevelLet(MultiClass *CurMultiClass);
std::vector<LetRecord> ParseLetList();
- Record *ParseDef(MultiClass *CurMultiClass);
bool ParseObjectBody(Record *CurRec);
bool ParseBody(Record *CurRec);
bool ParseBodyItem(Record *CurRec);
diff --git a/contrib/llvm/utils/TableGen/TableGen.cpp b/contrib/llvm/utils/TableGen/TableGen.cpp
index 17435f6..7a4f74f 100644
--- a/contrib/llvm/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm/utils/TableGen/TableGen.cpp
@@ -19,6 +19,7 @@
#include "AsmWriterEmitter.h"
#include "CallingConvEmitter.h"
#include "ClangASTNodesEmitter.h"
+#include "ClangAttrEmitter.h"
#include "ClangDiagnosticsEmitter.h"
#include "CodeEmitterGen.h"
#include "DAGISelEmitter.h"
@@ -29,6 +30,7 @@
#include "InstrInfoEmitter.h"
#include "IntrinsicEmitter.h"
#include "LLVMCConfigurationEmitter.h"
+#include "NeonEmitter.h"
#include "OptParserEmitter.h"
#include "Record.h"
#include "RegisterInfoEmitter.h"
@@ -52,8 +54,11 @@ enum ActionType {
GenARMDecoder,
GenDisassembler,
GenCallingConv,
+ GenClangAttrClasses,
+ GenClangAttrList,
GenClangDiagsDefs,
GenClangDiagGroups,
+ GenClangDeclNodes,
GenClangStmtNodes,
GenDAGISel,
GenFastISel,
@@ -63,6 +68,8 @@ enum ActionType {
GenTgtIntrinsic,
GenLLVMCConf,
GenEDHeader, GenEDInfo,
+ GenArmNeon,
+ GenArmNeonSema,
PrintEnums
};
@@ -107,10 +114,16 @@ namespace {
"Generate intrinsic information"),
clEnumValN(GenTgtIntrinsic, "gen-tgt-intrinsic",
"Generate target intrinsic information"),
+ clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes",
+ "Generate clang attribute clases"),
+ clEnumValN(GenClangAttrList, "gen-clang-attr-list",
+ "Generate a clang attribute list"),
clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
"Generate Clang diagnostics definitions"),
clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
"Generate Clang diagnostic groups"),
+ clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
+ "Generate Clang AST statement nodes"),
clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
"Generate Clang AST statement nodes"),
clEnumValN(GenLLVMCConf, "gen-llvmc",
@@ -119,6 +132,10 @@ namespace {
"Generate enhanced disassembly info header"),
clEnumValN(GenEDInfo, "gen-enhanced-disassembly-info",
"Generate enhanced disassembly info"),
+ clEnumValN(GenArmNeon, "gen-arm-neon",
+ "Generate arm_neon.h for clang"),
+ clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
+ "Generate ARM NEON sema support for clang"),
clEnumValN(PrintEnums, "print-enums",
"Print enum values for a class"),
clEnumValEnd));
@@ -191,105 +208,117 @@ int main(int argc, char **argv) {
if (ParseFile(InputFilename, IncludeDirs, SrcMgr))
return 1;
- raw_ostream *Out = &outs();
- if (OutputFilename != "-") {
- std::string Error;
- Out = new raw_fd_ostream(OutputFilename.c_str(), Error);
-
- if (!Error.empty()) {
- errs() << argv[0] << ": error opening " << OutputFilename
- << ":" << Error << "\n";
- return 1;
- }
-
- // Make sure the file gets removed if *gasp* tablegen crashes...
- sys::RemoveFileOnSignal(sys::Path(OutputFilename));
+ std::string Error;
+ raw_fd_ostream Out(OutputFilename.c_str(), Error);
+ if (!Error.empty()) {
+ errs() << argv[0] << ": error opening " << OutputFilename
+ << ":" << Error << "\n";
+ return 1;
}
+ // Make sure the file gets removed if *gasp* tablegen crashes...
+ sys::RemoveFileOnSignal(sys::Path(OutputFilename));
+
try {
switch (Action) {
case PrintRecords:
- *Out << Records; // No argument, dump all contents
+ Out << Records; // No argument, dump all contents
break;
case GenEmitter:
- CodeEmitterGen(Records).run(*Out);
+ CodeEmitterGen(Records).run(Out);
break;
case GenRegisterEnums:
- RegisterInfoEmitter(Records).runEnums(*Out);
+ RegisterInfoEmitter(Records).runEnums(Out);
break;
case GenRegister:
- RegisterInfoEmitter(Records).run(*Out);
+ RegisterInfoEmitter(Records).run(Out);
break;
case GenRegisterHeader:
- RegisterInfoEmitter(Records).runHeader(*Out);
+ RegisterInfoEmitter(Records).runHeader(Out);
break;
case GenInstrEnums:
- InstrEnumEmitter(Records).run(*Out);
+ InstrEnumEmitter(Records).run(Out);
break;
case GenInstrs:
- InstrInfoEmitter(Records).run(*Out);
+ InstrInfoEmitter(Records).run(Out);
break;
case GenCallingConv:
- CallingConvEmitter(Records).run(*Out);
+ CallingConvEmitter(Records).run(Out);
break;
case GenAsmWriter:
- AsmWriterEmitter(Records).run(*Out);
+ AsmWriterEmitter(Records).run(Out);
break;
case GenARMDecoder:
- ARMDecoderEmitter(Records).run(*Out);
+ ARMDecoderEmitter(Records).run(Out);
break;
case GenAsmMatcher:
- AsmMatcherEmitter(Records).run(*Out);
+ AsmMatcherEmitter(Records).run(Out);
+ break;
+ case GenClangAttrClasses:
+ ClangAttrClassEmitter(Records).run(Out);
+ break;
+ case GenClangAttrList:
+ ClangAttrListEmitter(Records).run(Out);
break;
case GenClangDiagsDefs:
- ClangDiagsDefsEmitter(Records, ClangComponent).run(*Out);
+ ClangDiagsDefsEmitter(Records, ClangComponent).run(Out);
break;
case GenClangDiagGroups:
- ClangDiagGroupsEmitter(Records).run(*Out);
+ ClangDiagGroupsEmitter(Records).run(Out);
+ break;
+ case GenClangDeclNodes:
+ ClangASTNodesEmitter(Records, "Decl", "Decl").run(Out);
+ ClangDeclContextEmitter(Records).run(Out);
break;
case GenClangStmtNodes:
- ClangStmtNodesEmitter(Records).run(*Out);
+ ClangASTNodesEmitter(Records, "Stmt", "").run(Out);
break;
case GenDisassembler:
- DisassemblerEmitter(Records).run(*Out);
+ DisassemblerEmitter(Records).run(Out);
break;
case GenOptParserDefs:
- OptParserEmitter(Records, true).run(*Out);
+ OptParserEmitter(Records, true).run(Out);
break;
case GenOptParserImpl:
- OptParserEmitter(Records, false).run(*Out);
+ OptParserEmitter(Records, false).run(Out);
break;
case GenDAGISel:
- DAGISelEmitter(Records).run(*Out);
+ DAGISelEmitter(Records).run(Out);
break;
case GenFastISel:
- FastISelEmitter(Records).run(*Out);
+ FastISelEmitter(Records).run(Out);
break;
case GenSubtarget:
- SubtargetEmitter(Records).run(*Out);
+ SubtargetEmitter(Records).run(Out);
break;
case GenIntrinsic:
- IntrinsicEmitter(Records).run(*Out);
+ IntrinsicEmitter(Records).run(Out);
break;
case GenTgtIntrinsic:
- IntrinsicEmitter(Records, true).run(*Out);
+ IntrinsicEmitter(Records, true).run(Out);
break;
case GenLLVMCConf:
- LLVMCConfigurationEmitter(Records).run(*Out);
+ LLVMCConfigurationEmitter(Records).run(Out);
break;
case GenEDHeader:
- EDEmitter(Records).runHeader(*Out);
+ EDEmitter(Records).runHeader(Out);
break;
case GenEDInfo:
- EDEmitter(Records).run(*Out);
+ EDEmitter(Records).run(Out);
+ break;
+ case GenArmNeon:
+ NeonEmitter(Records).run(Out);
+ break;
+ case GenArmNeonSema:
+ NeonEmitter(Records).runHeader(Out);
break;
case PrintEnums:
{
std::vector<Record*> Recs = Records.getAllDerivedDefinitions(Class);
for (unsigned i = 0, e = Recs.size(); i != e; ++i)
- *Out << Recs[i]->getName() << ", ";
- *Out << "\n";
+ Out << Recs[i]->getName() << ", ";
+ Out << "\n";
break;
}
default:
@@ -297,8 +326,6 @@ int main(int argc, char **argv) {
return 1;
}
- if (Out != &outs())
- delete Out; // Close the file
return 0;
} catch (const TGError &Error) {
@@ -313,9 +340,7 @@ int main(int argc, char **argv) {
errs() << argv[0] << ": Unknown unexpected exception occurred.\n";
}
- if (Out != &outs()) {
- delete Out; // Close the file
+ if (OutputFilename != "-")
std::remove(OutputFilename.c_str()); // Remove the file, it's broken
- }
return 1;
}
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
index b7085ae..4dba85b 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -33,7 +33,7 @@ using namespace llvm;
MAP(C9, 38) \
MAP(E8, 39) \
MAP(F0, 40) \
- MAP(F8, 41) \
+ MAP(F8, 41) \
MAP(F9, 42)
// A clone of X86 since we can't depend on something that is generated.
@@ -212,6 +212,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
HasOpSizePrefix = Rec->getValueAsBit("hasOpSizePrefix");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
+ HasVEX_4VPrefix = Rec->getValueAsBit("hasVEX_4VPrefix");
HasLockPrefix = Rec->getValueAsBit("hasLockPrefix");
IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
@@ -532,7 +533,13 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
"Unexpected number of operands for MRMSrcRegFrm");
HANDLE_OPERAND(roRegister)
HANDLE_OPERAND(rmRegister)
- HANDLE_OPTIONAL(immediate)
+
+ if (HasVEX_4VPrefix)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPTIONAL(rmRegister)
+ else
+ HANDLE_OPTIONAL(immediate)
break;
case X86Local::MRMSrcMem:
// Operand 1 is a register operand in the Reg/Opcode field.
@@ -541,6 +548,12 @@ void RecognizableInstr::emitInstructionSpecifier(DisassemblerTables &tables) {
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
+
+ if (HasVEX_4VPrefix)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPTIONAL(rmRegister)
+
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(immediate)
break;
@@ -823,6 +836,7 @@ OperandType RecognizableInstr::typeFromString(const std::string &s,
TYPE("RST", TYPE_ST)
TYPE("i128mem", TYPE_M128)
TYPE("i64i32imm_pcrel", TYPE_REL64)
+ TYPE("i16imm_pcrel", TYPE_REL16)
TYPE("i32imm_pcrel", TYPE_REL32)
TYPE("SSECC", TYPE_IMM3)
TYPE("brtarget", TYPE_RELv)
@@ -942,6 +956,7 @@ OperandEncoding RecognizableInstr::relocationEncodingFromString
ENCODING("i64i8imm", ENCODING_IB)
ENCODING("i8imm", ENCODING_IB)
ENCODING("i64i32imm_pcrel", ENCODING_ID)
+ ENCODING("i16imm_pcrel", ENCODING_IW)
ENCODING("i32imm_pcrel", ENCODING_ID)
ENCODING("brtarget", ENCODING_Iv)
ENCODING("brtarget8", ENCODING_IB)
diff --git a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
index 84374b0..db4d96d 100644
--- a/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
+++ b/contrib/llvm/utils/TableGen/X86RecognizableInstr.h
@@ -52,6 +52,8 @@ private:
bool HasOpSizePrefix;
/// The hasREX_WPrefix field from the record
bool HasREX_WPrefix;
+ /// The hasVEX_4VPrefix field from the record
+ bool HasVEX_4VPrefix;
/// The hasLockPrefix field from the record
bool HasLockPrefix;
/// The isCodeGenOnly filed from the record
diff --git a/contrib/llvm/utils/buildit/GNUmakefile b/contrib/llvm/utils/buildit/GNUmakefile
index 0f3b7eb..d17585f 100644
--- a/contrib/llvm/utils/buildit/GNUmakefile
+++ b/contrib/llvm/utils/buildit/GNUmakefile
@@ -32,7 +32,7 @@ DSTROOT = $(OBJROOT)/../dst
#######################################################################
-PREFIX = /usr/local
+PREFIX = /Developer/usr/local
# Unless assertions are forced on in the GMAKE command line, disable them.
ifndef ENABLE_ASSERTIONS
@@ -70,7 +70,7 @@ install: $(OBJROOT) $(SYMROOT) $(DSTROOT)
$(RC_ProjectSourceVersion) $(RC_ProjectSourceSubversion)
EmbeddedHosted:
- $(MAKE) ARM_HOSTED_BUILD=yes install
+ $(MAKE) ARM_HOSTED_BUILD=yes PREFIX=/usr install
# installhdrs does nothing, because the headers aren't useful until
# the compiler is installed.
diff --git a/contrib/llvm/utils/buildit/build_llvm b/contrib/llvm/utils/buildit/build_llvm
index a9a11d9..37ef16e 100755
--- a/contrib/llvm/utils/buildit/build_llvm
+++ b/contrib/llvm/utils/buildit/build_llvm
@@ -63,16 +63,6 @@ DIR=`pwd`
DARWIN_VERS=`uname -r | sed 's/\..*//'`
echo DARWIN_VERS = $DARWIN_VERS
-DEVELOPER_DIR="${DEVELOPER_DIR-Developer}"
-if [ "$ARM_HOSTED_BUILD" = yes ]; then
- DT_HOME="$DEST_DIR/usr"
- HOST_SDKROOT=$SDKROOT
-else
- DT_HOME="$DEST_DIR/$DEVELOPER_DIR/usr"
-fi
-
-DEST_ROOT="/$DEVELOPER_DIR$DEST_ROOT"
-
################################################################################
# Run the build.
@@ -132,16 +122,14 @@ elif [ $DARWIN_VERS -gt 9 ]; then
fi
if [ "$ARM_HOSTED_BUILD" = yes ]; then
- configure_prefix=$DT_HOME
configure_opts="--enable-targets=arm --host=arm-apple-darwin10 \
--target=arm-apple-darwin10 --build=i686-apple-darwin10"
else
- configure_prefix=$DT_HOME/local
configure_opts="--enable-targets=arm,x86,powerpc,cbe"
fi
if [ \! -f Makefile.config ]; then
- $SRC_DIR/configure --prefix=$configure_prefix $configure_opts \
+ $SRC_DIR/configure --prefix=$DEST_DIR$DEST_ROOT $configure_opts \
--enable-assertions=$LLVM_ASSERTIONS \
--enable-optimized=$LLVM_OPTIMIZED \
--disable-bindings \
@@ -195,9 +183,10 @@ if [ "x$MAJ_VER" != "x4" -o "x$MIN_VER" != "x0" ]; then
fi
make $JOBS_FLAG $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$HOSTS" \
- UNIVERSAL_SDK_PATH=$HOST_SDKROOT \
+ UNIVERSAL_SDK_PATH=$SDKROOT \
NO_RUNTIME_LIBS=1 \
DISABLE_EDIS=1 \
+ DEBUG_SYMBOLS=1 \
LLVM_SUBMIT_VERSION=$LLVM_SUBMIT_VERSION \
LLVM_SUBMIT_SUBVERSION=$LLVM_SUBMIT_SUBVERSION \
CXXFLAGS="-DLLVM_VERSION_INFO='\" Apple Build #$LLVM_VERSION\"'" \
@@ -223,6 +212,7 @@ cd $DIR/obj-llvm || exit 1
make $LOCAL_MAKEFLAGS $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$HOSTS" \
NO_RUNTIME_LIBS=1 \
DISABLE_EDIS=1 \
+ DEBUG_SYMBOLS=1 \
LLVM_SUBMIT_VERSION=$LLVM_SUBMIT_VERSION \
LLVM_SUBMIT_SUBVERSION=$LLVM_SUBMIT_SUBVERSION \
OPTIMIZE_OPTION='-O3' VERBOSE=1 install
@@ -243,14 +233,17 @@ echo "#define LLVM_MINOR_VERSION ${RC_ProjectSourceSubversion}" >> $DEST_DIR$DES
if [ "x$LLVM_DEBUG" != "x1" ]; then
# Strip local symbols from llvm libraries.
- strip -S $DEST_DIR$DEST_ROOT/lib/*.[oa]
+ #
+ # Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+ # PPC objects!
+ strip -Sl $DEST_DIR$DEST_ROOT/lib/*.[oa]
for f in `ls $DEST_DIR$DEST_ROOT/lib/*.so`; do
- strip -Sx $f
+ strip -Sxl $f
done
fi
# Copy over the tblgen utility.
-cp `find $DIR -name tblgen` $DT_HOME/local/bin
+cp `find $DIR -name tblgen` $DEST_DIR$DEST_ROOT/bin
# Remove .dir files
cd $DEST_DIR$DEST_ROOT
@@ -269,16 +262,8 @@ else
-exec lipo -extract ppc7400 -extract i386 -extract x86_64 {} -output {} \;
fi
-cd $DEST_DIR$DEST_ROOT
-if [ "$INSTALL_LIBLTO" == yes ]; then
- mkdir -p $DT_HOME/lib
- mv lib/libLTO.dylib $DT_HOME/lib/libLTO.dylib
- strip -S $DT_HOME/lib/libLTO.dylib
-fi
-rm -f lib/libLTO.a lib/libLTO.la
-
# The Hello dylib is an example of how to build a pass. No need to install it.
-rm lib/libLLVMHello.dylib
+rm $DEST_DIR$DEST_ROOT/lib/LLVMHello.dylib
# Compress manpages
MDIR=$DEST_DIR$DEST_ROOT/share/man/man1
@@ -324,25 +309,53 @@ find obj-* -name \*.\[chy\] -o -name \*.cpp -print \
| cpio -pdml $SYM_DIR/src || exit 1
################################################################################
+# Install and strip libLTO.dylib
+
+cd $DEST_DIR$DEST_ROOT
+if [ "$INSTALL_LIBLTO" = "yes" ]; then
+ DT_HOME="$DEST_DIR/Developer/usr"
+ mkdir -p $DT_HOME/lib
+ mv lib/libLTO.dylib $DT_HOME/lib/libLTO.dylib
+
+ # Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+ # PPC objects!
+ strip -arch all -Sl $DT_HOME/lib/libLTO.dylib
+else
+ rm -f lib/libLTO.dylib
+fi
+rm -f lib/libLTO.a lib/libLTO.la
+
+################################################################################
# Remove debugging information from DEST_DIR.
+cd $DIR || exit 1
+
find $DEST_DIR -name \*.a -print | xargs ranlib || exit 1
find $DEST_DIR -name \*.dSYM -print | xargs rm -r || exit 1
+
+# Strip debugging information from files
+#
+# Use '-l' to strip i386 modules. N.B. that flag doesn't work with kext or
+# PPC objects!
+find $DEST_DIR -perm -0111 -type f \
+ ! \( -name '*.la' -o -name gccas -o -name gccld -o -name llvm-config \) \
+ -print | xargs -n 1 -P ${SYSCTL} strip -arch all -Sl
+
chgrp -h -R wheel $DEST_DIR
chgrp -R wheel $DEST_DIR
################################################################################
-# Remove tar ball from docs directory
+# Remove the docs directory
-find $DEST_DIR -name html.tar.gz -exec rm {} \;
+rm -rf $DEST_DIR$DEST_ROOT/docs
################################################################################
# symlinks so that B&I can find things
-if [ "$INSTALL_LIBLTO" == yes ]; then
+if [ "$INSTALL_LIBLTO" = "yes" ]; then
mkdir -p $DEST_DIR/usr/lib/
cd $DEST_DIR/usr/lib && \
- ln -s ../../$DEVELOPER_DIR/usr/lib/libLTO.dylib ./libLTO.dylib
+ ln -s ../../Developer/usr/lib/libLTO.dylib ./libLTO.dylib
fi
################################################################################
diff --git a/contrib/llvm/utils/count/count.c b/contrib/llvm/utils/count/count.c
index a37e1e0..ae96791 100644
--- a/contrib/llvm/utils/count/count.c
+++ b/contrib/llvm/utils/count/count.c
@@ -26,13 +26,15 @@ int main(int argc, char **argv) {
}
NumLines = 0;
- while ((NumRead = fread(Buffer, 1, sizeof(Buffer), stdin))) {
+ do {
unsigned i;
+ NumRead = fread(Buffer, 1, sizeof(Buffer), stdin);
+
for (i = 0; i != NumRead; ++i)
if (Buffer[i] == '\n')
++NumLines;
- }
+ } while (NumRead == sizeof(Buffer));
if (!feof(stdin)) {
fprintf(stderr, "%s: error reading stdin\n", argv[0]);
diff --git a/contrib/llvm/utils/lit/lit/TestRunner.py b/contrib/llvm/utils/lit/lit/TestRunner.py
index d10e4b0..cdf1c93 100644
--- a/contrib/llvm/utils/lit/lit/TestRunner.py
+++ b/contrib/llvm/utils/lit/lit/TestRunner.py
@@ -13,11 +13,13 @@ class InternalShellError(Exception):
self.command = command
self.message = message
+kIsWindows = platform.system() == 'Windows'
+
# Don't use close_fds on Windows.
-kUseCloseFDs = platform.system() != 'Windows'
+kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
-kAvoidDevNull = platform.system() == 'Windows'
+kAvoidDevNull = kIsWindows
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
@@ -64,6 +66,7 @@ def executeShCmd(cmd, cfg, cwd, results):
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
+ named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
@@ -146,6 +149,15 @@ def executeShCmd(cmd, cfg, cwd, results):
if not args[0]:
raise InternalShellError(j, '%r: command not found' % j.args[0])
+ # Replace uses of /dev/null with temporary files.
+ if kAvoidDevNull:
+ for i,arg in enumerate(args):
+ if arg == "/dev/null":
+ f = tempfile.NamedTemporaryFile(delete=False)
+ f.close()
+ named_temp_files.append(f.name)
+ args[i] = f.name
+
procs.append(subprocess.Popen(args, cwd=cwd,
stdin = stdin,
stdout = stdout,
@@ -207,6 +219,13 @@ def executeShCmd(cmd, cfg, cwd, results):
for f in opened_files:
f.close()
+ # Remove any named temporary files we created.
+ for f in named_temp_files:
+ try:
+ os.remove(f)
+ except OSError:
+ pass
+
if cmd.negate:
exitCode = not exitCode
@@ -364,7 +383,7 @@ def isExpectedFail(xfails, xtargets, target_triple):
return True
-def parseIntegratedTestScript(test):
+def parseIntegratedTestScript(test, normalize_slashes=False):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET'
information. The RUN lines also will have variable substitution performed.
@@ -375,18 +394,25 @@ def parseIntegratedTestScript(test):
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
+ sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpBase = os.path.join(execdir, 'Output', execbase)
if test.index is not None:
tmpBase += '_%d' % test.index
+ # Normalize slashes, if requested.
+ if normalize_slashes:
+ sourcepath = sourcepath.replace('\\', '/')
+ sourcedir = sourcedir.replace('\\', '/')
+ tmpBase = tmpBase.replace('\\', '/')
+
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = [('%%', '#_MARKER_#')]
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
- ('%S', os.path.dirname(sourcepath)),
- ('%p', os.path.dirname(sourcepath)),
+ ('%S', sourcedir),
+ ('%p', sourcedir),
('%t', tmpBase + '.tmp'),
# FIXME: Remove this once we kill DejaGNU.
('%abs_tmp', tmpBase + '.tmp'),
@@ -462,7 +488,9 @@ def executeTclTest(test, litConfig):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
- res = parseIntegratedTestScript(test)
+ # Parse the test script, normalizing slashes in substitutions on Windows
+ # (since otherwise Tcl style lexing will treat them as escapes).
+ res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
if len(res) == 2:
return res
diff --git a/contrib/llvm/utils/unittest/UnitTestMain/Makefile b/contrib/llvm/utils/unittest/UnitTestMain/Makefile
index 5c10049..cec654f 100644
--- a/contrib/llvm/utils/unittest/UnitTestMain/Makefile
+++ b/contrib/llvm/utils/unittest/UnitTestMain/Makefile
@@ -20,7 +20,11 @@ CPP.Flags += $(NO_MISSING_FIELD_INITIALIZERS) $(NO_VARIADIC_MACROS)
CPP.Flags += -DGTEST_HAS_RTTI=0
# libstdc++'s TR1 <tuple> header depends on RTTI and uses C++'0x features not
# supported by Clang, so force googletest to use its own tuple implementation.
-# When we import googletest >=1.4.0, we can drop this line.
-CPP.Flags += -DGTEST_HAS_TR1_TUPLE=0
+CPP.Flags += -DGTEST_USE_OWN_TR1_TUPLE
+
+# Disable pthreads if LLVM was configured without them.
+ifneq ($(HAVE_PTHREAD), 1)
+ CPP.Flags += -DGTEST_HAS_PTHREAD=0
+endif
include $(LEVEL)/Makefile.common
diff --git a/contrib/llvm/utils/unittest/googletest/Makefile b/contrib/llvm/utils/unittest/googletest/Makefile
index 1ec979d..21b29ff 100644
--- a/contrib/llvm/utils/unittest/googletest/Makefile
+++ b/contrib/llvm/utils/unittest/googletest/Makefile
@@ -23,9 +23,12 @@ CPP.Flags += $(NO_MISSING_FIELD_INITIALIZERS) $(NO_VARIADIC_MACROS)
CPP.Flags += -DGTEST_HAS_RTTI=0
# libstdc++'s TR1 <tuple> header depends on RTTI and uses C++'0x features not
# supported by Clang, so force googletest to use its own tuple implementation.
-# When we import googletest >=1.4.0, we can drop this line.
-CPP.Flags += -DGTEST_HAS_TR1_TUPLE=0
+CPP.Flags += -DGTEST_USE_OWN_TR1_TUPLE
+# Disable pthreads if LLVM was configured without them.
+ifneq ($(HAVE_PTHREAD), 1)
+ CPP.Flags += -DGTEST_HAS_PTHREAD=0
+endif
ifeq ($(HOST_OS),MingW)
CPP.Flags += -DGTEST_OS_WINDOWS=1
diff --git a/contrib/llvm/utils/unittest/googletest/README.LLVM b/contrib/llvm/utils/unittest/googletest/README.LLVM
index e907a5e..d6e6f98 100644
--- a/contrib/llvm/utils/unittest/googletest/README.LLVM
+++ b/contrib/llvm/utils/unittest/googletest/README.LLVM
@@ -1,14 +1,14 @@
LLVM notes
----------
-This directory contains Google Test 1.2.1, with all elements removed except for
+This directory contains Google Test 1.5.0, with all elements removed except for
the actual source code, to minimize the addition to the LLVM distribution.
Cleaned up as follows:
# Remove all the unnecessary files and directories
-$ rm -f aclocal* configure* Makefile* CHANGES CONTRIBUTORS README
-$ rm -rf build-aux m4 make msvc samples scons scripts test xcode
+$ rm -f aclocal* CMakeLists.txt configure* Makefile* CHANGES CONTRIBUTORS README
+$ rm -rf build-aux codegear fused-src m4 make msvc samples scripts test xcode
$ rm -f `find . -name \*\.pump`
# Move all the source files to the current directory
@@ -21,11 +21,11 @@ $ mv *.h include/gtest/internal/
# Update paths to the included files
$ perl -pi -e 's|^#include "src/|#include "gtest/internal/|' *.cc
-$ rm -f gtest-all.cc gtest_main.cc
-
$ mv COPYING LICENSE.TXT
Modified as follows:
* To GTestStreamToHelper in include/gtest/internal/gtest-internal.h,
added the ability to stream with raw_os_ostream.
+* To refresh Haiku support in include/gtest/internal/gtest-port.h,
+ see http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20100621/102898.html
diff --git a/contrib/llvm/utils/unittest/googletest/gtest-death-test.cc b/contrib/llvm/utils/unittest/googletest/gtest-death-test.cc
index 617e301..e4199de 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest-death-test.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest-death-test.cc
@@ -27,17 +27,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Author: wan@google.com (Zhanyong Wan)
+// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
//
// This file implements death tests.
#include <gtest/gtest-death-test.h>
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_MAC
+#include <crt_externs.h>
+#endif // GTEST_OS_MAC
+
#include <errno.h>
+#include <fcntl.h>
#include <limits.h>
#include <stdarg.h>
+
+#if GTEST_OS_WINDOWS
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#include <sys/wait.h>
+#endif // GTEST_OS_WINDOWS
+
#endif // GTEST_HAS_DEATH_TEST
#include <gtest/gtest-message.h>
@@ -48,9 +62,9 @@
// included, or there will be a compiler error. This trick is to
// prevent a user from accidentally including gtest-internal-inl.h in
// his code.
-#define GTEST_IMPLEMENTATION
+#define GTEST_IMPLEMENTATION_ 1
#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
+#undef GTEST_IMPLEMENTATION_
namespace testing {
@@ -68,6 +82,18 @@ GTEST_DEFINE_string_(
"\"fast\" (child process runs the death test immediately "
"after forking).");
+GTEST_DEFINE_bool_(
+ death_test_use_fork,
+ internal::BoolFromGTestEnv("death_test_use_fork", false),
+ "Instructs to use fork()/_exit() instead of clone() in death tests. "
+ "Ignored and always uses fork() on POSIX systems where clone() is not "
+ "implemented. Useful when running under valgrind or similar tools if "
+ "those do not support clone(). Valgrind 3.3.1 will just fail if "
+ "it sees an unsupported combination of clone() flags. "
+ "It is not recommended to use this flag w/o valgrind though it will "
+ "work in 99% of the cases. Once valgrind is fixed, this flag will "
+ "most likely be removed.");
+
namespace internal {
GTEST_DEFINE_string_(
internal_run_death_test, "",
@@ -79,7 +105,7 @@ GTEST_DEFINE_string_(
"death test. FOR INTERNAL USE ONLY.");
} // namespace internal
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
// ExitedWithCode constructor.
ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
@@ -87,9 +113,14 @@ ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
// ExitedWithCode function-call operator.
bool ExitedWithCode::operator()(int exit_status) const {
+#if GTEST_OS_WINDOWS
+ return exit_status == exit_code_;
+#else
return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+#endif // GTEST_OS_WINDOWS
}
+#if !GTEST_OS_WINDOWS
// KilledBySignal constructor.
KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
}
@@ -98,6 +129,7 @@ KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
bool KilledBySignal::operator()(int exit_status) const {
return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
}
+#endif // !GTEST_OS_WINDOWS
namespace internal {
@@ -107,6 +139,9 @@ namespace internal {
// specified by wait(2).
static String ExitSummary(int exit_code) {
Message m;
+#if GTEST_OS_WINDOWS
+ m << "Exited with exit status " << exit_code;
+#else
if (WIFEXITED(exit_code)) {
m << "Exited with exit status " << WEXITSTATUS(exit_code);
} else if (WIFSIGNALED(exit_code)) {
@@ -117,6 +152,7 @@ static String ExitSummary(int exit_code) {
m << " (core dumped)";
}
#endif
+#endif // GTEST_OS_WINDOWS
return m.GetString();
}
@@ -126,6 +162,7 @@ bool ExitedUnsuccessfully(int exit_status) {
return !ExitedWithCode(0)(exit_status);
}
+#if !GTEST_OS_WINDOWS
// Generates a textual failure message when a death test finds more than
// one thread running, or cannot determine the number of threads, prior
// to executing the given statement. It is the responsibility of the
@@ -133,17 +170,14 @@ bool ExitedUnsuccessfully(int exit_status) {
static String DeathTestThreadWarning(size_t thread_count) {
Message msg;
msg << "Death tests use fork(), which is unsafe particularly"
- << " in a threaded context. For this test, " << GTEST_NAME << " ";
+ << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
if (thread_count == 0)
msg << "couldn't detect the number of threads.";
else
msg << "detected " << thread_count << " threads.";
return msg.GetString();
}
-
-// Static string containing a description of the outcome of the
-// last death test.
-static String last_death_test_message;
+#endif // !GTEST_OS_WINDOWS
// Flag characters for reporting a death test that did not die.
static const char kDeathTestLived = 'L';
@@ -159,29 +193,25 @@ static const char kDeathTestInternalError = 'I';
enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED };
// Routine for aborting the program which is safe to call from an
-// exec-style death test child process, in which case the the error
+// exec-style death test child process, in which case the error
// message is propagated back to the parent process. Otherwise, the
// message is simply printed to stderr. In either case, the program
// then exits with status 1.
-void DeathTestAbort(const char* format, ...) {
- // This function may be called from a threadsafe-style death test
- // child process, which operates on a very small stack. Use the
- // heap for any additional non-miniscule memory requirements.
+void DeathTestAbort(const String& message) {
+ // On a POSIX system, this function may be called from a threadsafe-style
+ // death test child process, which operates on a very small stack. Use
+ // the heap for any additional non-minuscule memory requirements.
const InternalRunDeathTestFlag* const flag =
GetUnitTestImpl()->internal_run_death_test_flag();
- va_list args;
- va_start(args, format);
-
if (flag != NULL) {
- FILE* parent = fdopen(flag->status_fd, "w");
+ FILE* parent = posix::FDOpen(flag->write_fd(), "w");
fputc(kDeathTestInternalError, parent);
- vfprintf(parent, format, args);
- fclose(parent);
- va_end(args);
+ fprintf(parent, "%s", message.c_str());
+ fflush(parent);
_exit(1);
} else {
- vfprintf(stderr, format, args);
- va_end(args);
+ fprintf(stderr, "%s", message.c_str());
+ fflush(stderr);
abort();
}
}
@@ -190,11 +220,12 @@ void DeathTestAbort(const char* format, ...) {
// fails.
#define GTEST_DEATH_TEST_CHECK_(expression) \
do { \
- if (!(expression)) { \
- DeathTestAbort("CHECK failed: File %s, line %d: %s", \
- __FILE__, __LINE__, #expression); \
+ if (!::testing::internal::IsTrue(expression)) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s", \
+ __FILE__, __LINE__, #expression)); \
} \
- } while (0)
+ } while (::testing::internal::AlwaysFalse())
// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
// evaluating any system call that fulfills two conditions: it must return
@@ -205,15 +236,46 @@ void DeathTestAbort(const char* format, ...) {
// something other than EINTR, DeathTestAbort is called.
#define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
do { \
- int retval; \
+ int gtest_retval; \
do { \
- retval = (expression); \
- } while (retval == -1 && errno == EINTR); \
- if (retval == -1) { \
- DeathTestAbort("CHECK failed: File %s, line %d: %s != -1", \
- __FILE__, __LINE__, #expression); \
+ gtest_retval = (expression); \
+ } while (gtest_retval == -1 && errno == EINTR); \
+ if (gtest_retval == -1) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s != -1", \
+ __FILE__, __LINE__, #expression)); \
} \
- } while (0)
+ } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+String GetLastErrnoDescription() {
+ return String(errno == 0 ? "" : posix::StrError(errno));
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+ Message error;
+ char buffer[256];
+ int num_read;
+
+ do {
+ while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+ buffer[num_read] = '\0';
+ error << buffer;
+ }
+ } while (num_read == -1 && errno == EINTR);
+
+ if (num_read == 0) {
+ GTEST_LOG_(FATAL) << error.GetString();
+ } else {
+ const int last_error = errno;
+ GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+ << GetLastErrnoDescription() << " [" << last_error << "]";
+ }
+}
// Death test constructor. Increments the running death test count
// for the current test.
@@ -234,143 +296,146 @@ bool DeathTest::Create(const char* statement, const RE* regex,
}
const char* DeathTest::LastMessage() {
- return last_death_test_message.c_str();
+ return last_death_test_message_.c_str();
}
-// ForkingDeathTest provides implementations for most of the abstract
-// methods of the DeathTest interface. Only the AssumeRole method is
-// left undefined.
-class ForkingDeathTest : public DeathTest {
- public:
- ForkingDeathTest(const char* statement, const RE* regex);
+void DeathTest::set_last_death_test_message(const String& message) {
+ last_death_test_message_ = message;
+}
- // All of these virtual functions are inherited from DeathTest.
- virtual int Wait();
- virtual bool Passed(bool status_ok);
- virtual void Abort(AbortReason reason);
+String DeathTest::last_death_test_message_;
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
protected:
- void set_forked(bool forked) { forked_ = forked; }
- void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+ DeathTestImpl(const char* a_statement, const RE* a_regex)
+ : statement_(a_statement),
+ regex_(a_regex),
+ spawned_(false),
+ status_(-1),
+ outcome_(IN_PROGRESS),
+ read_fd_(-1),
+ write_fd_(-1) {}
+
+ // read_fd_ is expected to be closed and cleared by a derived class.
+ ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+ void Abort(AbortReason reason);
+ virtual bool Passed(bool status_ok);
+
+ const char* statement() const { return statement_; }
+ const RE* regex() const { return regex_; }
+ bool spawned() const { return spawned_; }
+ void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+ int status() const { return status_; }
+ void set_status(int a_status) { status_ = a_status; }
+ DeathTestOutcome outcome() const { return outcome_; }
+ void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+ int read_fd() const { return read_fd_; }
void set_read_fd(int fd) { read_fd_ = fd; }
+ int write_fd() const { return write_fd_; }
void set_write_fd(int fd) { write_fd_ = fd; }
+ // Called in the parent process only. Reads the result code of the death
+ // test child process via a pipe, interprets it to set the outcome_
+ // member, and closes read_fd_. Outputs diagnostics and terminates in
+ // case of unexpected codes.
+ void ReadAndInterpretStatusByte();
+
private:
- // The textual content of the code this object is testing.
+ // The textual content of the code this object is testing. This class
+ // doesn't own this string and should not attempt to delete it.
const char* const statement_;
- // The regular expression which test output must match.
+ // The regular expression which test output must match. DeathTestImpl
+ // doesn't own this object and should not attempt to delete it.
const RE* const regex_;
- // True if the death test successfully forked.
- bool forked_;
- // PID of child process during death test; 0 in the child process itself.
- pid_t child_pid_;
- // File descriptors for communicating the death test's status byte.
- int read_fd_; // Always -1 in the child process.
- int write_fd_; // Always -1 in the parent process.
+ // True if the death test child process has been successfully spawned.
+ bool spawned_;
// The exit status of the child process.
int status_;
// How the death test concluded.
DeathTestOutcome outcome_;
+ // Descriptor to the read end of the pipe to the child process. It is
+ // always -1 in the child process. The child keeps its write end of the
+ // pipe in write_fd_.
+ int read_fd_;
+ // Descriptor to the child's write end of the pipe to the parent process.
+ // It is always -1 in the parent process. The parent keeps its end of the
+ // pipe in read_fd_.
+ int write_fd_;
};
-// Constructs a ForkingDeathTest.
-ForkingDeathTest::ForkingDeathTest(const char* statement, const RE* regex)
- : DeathTest(),
- statement_(statement),
- regex_(regex),
- forked_(false),
- child_pid_(-1),
- read_fd_(-1),
- write_fd_(-1),
- status_(-1),
- outcome_(IN_PROGRESS) {
-}
-
-// Reads an internal failure message from a file descriptor, then calls
-// LOG(FATAL) with that message. Called from a death test parent process
-// to read a failure message from the death test child process.
-static void FailFromInternalError(int fd) {
- Message error;
- char buffer[256];
- ssize_t num_read;
-
- do {
- while ((num_read = read(fd, buffer, 255)) > 0) {
- buffer[num_read] = '\0';
- error << buffer;
- }
- } while (num_read == -1 && errno == EINTR);
-
- // TODO(smcafee): Maybe just FAIL the test instead?
- if (num_read == 0) {
- GTEST_LOG_(FATAL, error);
- } else {
- GTEST_LOG_(FATAL,
- Message() << "Error while reading death test internal: "
- << strerror(errno) << " [" << errno << "]");
- }
-}
-
-// Waits for the child in a death test to exit, returning its exit
-// status, or 0 if no child process exists. As a side effect, sets the
-// outcome data member.
-int ForkingDeathTest::Wait() {
- if (!forked_)
- return 0;
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_. Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+ char flag;
+ int bytes_read;
// The read() here blocks until data is available (signifying the
// failure of the death test) or until the pipe is closed (signifying
// its success), so it's okay to call this in the parent before
// the child process has exited.
- char flag;
- ssize_t bytes_read;
-
do {
- bytes_read = read(read_fd_, &flag, 1);
+ bytes_read = posix::Read(read_fd(), &flag, 1);
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read == 0) {
- outcome_ = DIED;
+ set_outcome(DIED);
} else if (bytes_read == 1) {
switch (flag) {
case kDeathTestReturned:
- outcome_ = RETURNED;
+ set_outcome(RETURNED);
break;
case kDeathTestLived:
- outcome_ = LIVED;
+ set_outcome(LIVED);
break;
case kDeathTestInternalError:
- FailFromInternalError(read_fd_); // Does not return.
+ FailFromInternalError(read_fd()); // Does not return.
break;
default:
- GTEST_LOG_(FATAL,
- Message() << "Death test child process reported unexpected "
- << "status byte (" << static_cast<unsigned int>(flag)
- << ")");
+ GTEST_LOG_(FATAL) << "Death test child process reported "
+ << "unexpected status byte ("
+ << static_cast<unsigned int>(flag) << ")";
}
} else {
- GTEST_LOG_(FATAL,
- Message() << "Read from death test child process failed: "
- << strerror(errno));
+ GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+ << GetLastErrnoDescription();
}
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+ set_read_fd(-1);
+}
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(read_fd_));
- GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_, 0));
- return status_;
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+ // The parent process considers the death test to be a failure if
+ // it finds any data in our pipe. So, here we write a single flag byte
+ // to the pipe, then exit.
+ const char status_ch =
+ reason == TEST_DID_NOT_DIE ? kDeathTestLived : kDeathTestReturned;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(write_fd()));
+ _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
}
// Assesses the success or failure of a death test, using both private
// members which have previously been set, and one argument:
//
// Private data members:
-// outcome: an enumeration describing how the death test
+// outcome: An enumeration describing how the death test
// concluded: DIED, LIVED, or RETURNED. The death test fails
-// in the latter two cases
-// status: the exit status of the child process, in the format
-// specified by wait(2)
-// regex: a regular expression object to be applied to
+// in the latter two cases.
+// status: The exit status of the child process. On *nix, it is in the
+// in the format specified by wait(2). On Windows, this is the
+// value supplied to the ExitProcess() API or a numeric code
+// of the exception that terminated the program.
+// regex: A regular expression object to be applied to
// the test's captured standard error output; the death test
-// fails if it does not match
+// fails if it does not match.
//
// Argument:
// status_ok: true if exit_status is acceptable in the context of
@@ -378,22 +443,18 @@ int ForkingDeathTest::Wait() {
//
// Returns true iff all of the above conditions are met. Otherwise, the
// first failing condition, in the order given above, is the one that is
-// reported. Also sets the static variable last_death_test_message.
-bool ForkingDeathTest::Passed(bool status_ok) {
- if (!forked_)
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+ if (!spawned())
return false;
-#if GTEST_HAS_GLOBAL_STRING
- const ::string error_message = GetCapturedStderr();
-#else
- const ::std::string error_message = GetCapturedStderr();
-#endif // GTEST_HAS_GLOBAL_STRING
+ const String error_message = GetCapturedStderr();
bool success = false;
Message buffer;
- buffer << "Death test: " << statement_ << "\n";
- switch (outcome_) {
+ buffer << "Death test: " << statement() << "\n";
+ switch (outcome()) {
case LIVED:
buffer << " Result: failed to die.\n"
<< " Error msg: " << error_message;
@@ -404,49 +465,271 @@ bool ForkingDeathTest::Passed(bool status_ok) {
break;
case DIED:
if (status_ok) {
- if (RE::PartialMatch(error_message, *regex_)) {
+ const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
+ if (matched) {
success = true;
} else {
buffer << " Result: died but not with expected error.\n"
- << " Expected: " << regex_->pattern() << "\n"
+ << " Expected: " << regex()->pattern() << "\n"
<< "Actual msg: " << error_message;
}
} else {
buffer << " Result: died but not with expected exit code:\n"
- << " " << ExitSummary(status_) << "\n";
+ << " " << ExitSummary(status()) << "\n";
}
break;
case IN_PROGRESS:
default:
- GTEST_LOG_(FATAL,
- "DeathTest::Passed somehow called before conclusion of test");
+ GTEST_LOG_(FATAL)
+ << "DeathTest::Passed somehow called before conclusion of test";
}
- last_death_test_message = buffer.GetString();
+ DeathTest::set_last_death_test_message(buffer.GetString());
return success;
}
-// Signals that the death test code which should have exited, didn't.
-// Should be called only in a death test child process.
-// Writes a status byte to the child's status file desriptor, then
-// calls _exit(1).
-void ForkingDeathTest::Abort(AbortReason reason) {
- // The parent process considers the death test to be a failure if
- // it finds any data in our pipe. So, here we write a single flag byte
- // to the pipe, then exit.
- const char flag =
- reason == TEST_DID_NOT_DIE ? kDeathTestLived : kDeathTestReturned;
- GTEST_DEATH_TEST_CHECK_SYSCALL_(write(write_fd_, &flag, 1));
- GTEST_DEATH_TEST_CHECK_SYSCALL_(close(write_fd_));
- _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
+#if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes: Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+// ends of it.
+// 2. The parent starts the child and provides it with the information
+// necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+// using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+// this is done before step 3, the object's reference count goes down to
+// 0 and it is destroyed, preventing the child from acquiring it. The
+// parent now has to release it, or read operations on the read end of
+// the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+// any possible error messages) from the pipe, and its stderr and then
+// determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+ WindowsDeathTest(const char* statement,
+ const RE* regex,
+ const char* file,
+ int line)
+ : DeathTestImpl(statement, regex), file_(file), line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+ virtual TestRole AssumeRole();
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // Handle to the write end of the pipe to the child process.
+ AutoHandle write_handle_;
+ // Child process handle.
+ AutoHandle child_handle_;
+ // Event the child process uses to signal the parent that it has
+ // acquired the handle to the write end of the pipe. After seeing this
+ // event the parent can release its own handles to make sure its
+ // ReadFile() calls return when the child terminates.
+ AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ // Wait until the child either signals that it has acquired the write end
+ // of the pipe or it dies.
+ const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+ switch (::WaitForMultipleObjects(2,
+ wait_handles,
+ FALSE, // Waits for any of the handles.
+ INFINITE)) {
+ case WAIT_OBJECT_0:
+ case WAIT_OBJECT_0 + 1:
+ break;
+ default:
+ GTEST_DEATH_TEST_CHECK_(false); // Should not get here.
+ }
+
+ // The child has acquired the write end of the pipe or exited.
+ // We release the handle on our side and continue.
+ write_handle_.Reset();
+ event_handle_.Reset();
+
+ ReadAndInterpretStatusByte();
+
+ // Waits for the child process to exit if it haven't already. This
+ // returns immediately if the child has already exited, regardless of
+ // whether previous calls to WaitForMultipleObjects synchronized on this
+ // handle or not.
+ GTEST_DEATH_TEST_CHECK_(
+ WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+ INFINITE));
+ DWORD status;
+ GTEST_DEATH_TEST_CHECK_(::GetExitCodeProcess(child_handle_.Get(), &status)
+ != FALSE);
+ child_handle_.Reset();
+ set_status(static_cast<int>(status));
+ return this->status();
+}
+
+// The AssumeRole process for a Windows death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ // WindowsDeathTest uses an anonymous pipe to communicate results of
+ // a death test.
+ SECURITY_ATTRIBUTES handles_are_inheritable = {
+ sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+ HANDLE read_handle, write_handle;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+ 0) // Default buffer size.
+ != FALSE);
+ set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+ O_RDONLY));
+ write_handle_.Reset(write_handle);
+ event_handle_.Reset(::CreateEvent(
+ &handles_are_inheritable,
+ TRUE, // The event will automatically reset to non-signaled state.
+ FALSE, // The initial state is non-signalled.
+ NULL)); // The even is unnamed.
+ GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
+ const String filter_flag = String::Format("--%s%s=%s.%s",
+ GTEST_FLAG_PREFIX_, kFilterFlag,
+ info->test_case_name(),
+ info->name());
+ const String internal_flag = String::Format(
+ "--%s%s=%s|%d|%d|%u|%Iu|%Iu",
+ GTEST_FLAG_PREFIX_,
+ kInternalRunDeathTestFlag,
+ file_, line_,
+ death_test_index,
+ static_cast<unsigned int>(::GetCurrentProcessId()),
+ // size_t has the same with as pointers on both 32-bit and 64-bit
+ // Windows platforms.
+ // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+ reinterpret_cast<size_t>(write_handle),
+ reinterpret_cast<size_t>(event_handle_.Get()));
+
+ char executable_path[_MAX_PATH + 1]; // NOLINT
+ GTEST_DEATH_TEST_CHECK_(
+ _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
+ executable_path,
+ _MAX_PATH));
+
+ String command_line = String::Format("%s %s \"%s\"",
+ ::GetCommandLineA(),
+ filter_flag.c_str(),
+ internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // The child process will share the standard handles with the parent.
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(STARTUPINFO));
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ PROCESS_INFORMATION process_info;
+ GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
+ executable_path,
+ const_cast<char*>(command_line.c_str()),
+ NULL, // Retuned process handle is not inheritable.
+ NULL, // Retuned thread handle is not inheritable.
+ TRUE, // Child inherits all inheritable handles (for write_handle_).
+ 0x0, // Default creation flags.
+ NULL, // Inherit the parent's environment.
+ UnitTest::GetInstance()->original_working_dir(),
+ &startup_info,
+ &process_info) != FALSE);
+ child_handle_.Reset(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+#else // We are not on Windows.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface. Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+ ForkingDeathTest(const char* statement, const RE* regex);
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+
+ protected:
+ void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+ // PID of child process during death test; 0 in the child process itself.
+ pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
+ : DeathTestImpl(a_statement, a_regex),
+ child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ ReadAndInterpretStatusByte();
+
+ int status_value;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+ set_status(status_value);
+ return status_value;
}
// A concrete death test class that forks, then immediately runs the test
// in the child process.
class NoExecDeathTest : public ForkingDeathTest {
public:
- NoExecDeathTest(const char* statement, const RE* regex) :
- ForkingDeathTest(statement, regex) { }
+ NoExecDeathTest(const char* a_statement, const RE* a_regex) :
+ ForkingDeathTest(a_statement, a_regex) { }
virtual TestRole AssumeRole();
};
@@ -455,13 +738,13 @@ class NoExecDeathTest : public ForkingDeathTest {
DeathTest::TestRole NoExecDeathTest::AssumeRole() {
const size_t thread_count = GetThreadCount();
if (thread_count != 1) {
- GTEST_LOG_(WARNING, DeathTestThreadWarning(thread_count));
+ GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
}
int pipe_fd[2];
GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
- last_death_test_message = "";
+ DeathTest::set_last_death_test_message("");
CaptureStderr();
// When we fork the process below, the log file buffers are copied, but the
// file descriptors are shared. We flush all log files here so that closing
@@ -482,11 +765,14 @@ DeathTest::TestRole NoExecDeathTest::AssumeRole() {
// concurrent writes to the log files. We capture stderr in the parent
// process and append the child process' output to a log.
LogToStderr();
+ // Event forwarding to the listeners of event listener API mush be shut
+ // down in death test subprocesses.
+ GetUnitTestImpl()->listeners()->SuppressEventForwarding();
return EXECUTE_TEST;
} else {
GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
set_read_fd(pipe_fd[0]);
- set_forked(true);
+ set_spawned(true);
return OVERSEE_TEST;
}
}
@@ -496,9 +782,9 @@ DeathTest::TestRole NoExecDeathTest::AssumeRole() {
// only this specific death test to be run.
class ExecDeathTest : public ForkingDeathTest {
public:
- ExecDeathTest(const char* statement, const RE* regex,
+ ExecDeathTest(const char* a_statement, const RE* a_regex,
const char* file, int line) :
- ForkingDeathTest(statement, regex), file_(file), line_(line) { }
+ ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
virtual TestRole AssumeRole();
private:
// The name of the file in which the death test is located.
@@ -513,15 +799,15 @@ class Arguments {
Arguments() {
args_.push_back(NULL);
}
+
~Arguments() {
- for (std::vector<char*>::iterator i = args_.begin();
- i + 1 != args_.end();
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
++i) {
free(*i);
}
}
void AddArgument(const char* argument) {
- args_.insert(args_.end() - 1, strdup(argument));
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
}
template <typename Str>
@@ -529,7 +815,7 @@ class Arguments {
for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
i != arguments.end();
++i) {
- args_.insert(args_.end() - 1, strdup(i->c_str()));
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
}
}
char* const* Argv() {
@@ -546,6 +832,20 @@ struct ExecDeathTestArgs {
int close_fd; // File descriptor to close; the read end of a pipe
};
+#if GTEST_OS_MAC
+inline char** GetEnviron() {
+ // When Google Test is built as a framework on MacOS X, the environ variable
+ // is unavailable. Apple's documentation (man environ) recommends using
+ // _NSGetEnviron() instead.
+ return *_NSGetEnviron();
+}
+#else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+#endif // GTEST_OS_MAC
+
// The main function for a threadsafe-style death test child process.
// This function is called in a clone()-ed process and thus must avoid
// any potentially unsafe operations like malloc or libc functions.
@@ -560,8 +860,9 @@ static int ExecDeathTestChildMain(void* child_arg) {
UnitTest::GetInstance()->original_working_dir();
// We can safely call chdir() as it's a direct system call.
if (chdir(original_dir) != 0) {
- DeathTestAbort("chdir(\"%s\") failed: %s",
- original_dir, strerror(errno));
+ DeathTestAbort(String::Format("chdir(\"%s\") failed: %s",
+ original_dir,
+ GetLastErrnoDescription().c_str()));
return EXIT_FAILURE;
}
@@ -570,9 +871,11 @@ static int ExecDeathTestChildMain(void* child_arg) {
// unsafe. Since execve() doesn't search the PATH, the user must
// invoke the test program via a valid path that contains at least
// one path separator.
- execve(args->argv[0], args->argv, environ);
- DeathTestAbort("execve(%s, ...) in %s failed: %s",
- args->argv[0], original_dir, strerror(errno));
+ execve(args->argv[0], args->argv, GetEnviron());
+ DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s",
+ args->argv[0],
+ original_dir,
+ GetLastErrnoDescription().c_str()));
return EXIT_FAILURE;
}
@@ -581,12 +884,12 @@ static int ExecDeathTestChildMain(void* child_arg) {
// This could be accomplished more elegantly by a single recursive
// function, but we want to guard against the unlikely possibility of
// a smart compiler optimizing the recursion away.
-static bool StackLowerThanAddress(const void* ptr) {
+bool StackLowerThanAddress(const void* ptr) {
int dummy;
return &dummy < ptr;
}
-static bool StackGrowsDown() {
+bool StackGrowsDown() {
int dummy;
return StackLowerThanAddress(&dummy);
}
@@ -595,18 +898,36 @@ static bool StackGrowsDown() {
// that uses clone(2). It dies with an error message if anything goes
// wrong.
static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
- static const bool stack_grows_down = StackGrowsDown();
- const size_t stack_size = getpagesize();
- void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
- void* const stack_top =
- static_cast<char*>(stack) + (stack_grows_down ? stack_size : 0);
ExecDeathTestArgs args = { argv, close_fd };
- const pid_t child_pid = clone(&ExecDeathTestChildMain, stack_top,
- SIGCHLD, &args);
+ pid_t child_pid = -1;
+
+#if GTEST_HAS_CLONE
+ const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+ if (!use_fork) {
+ static const bool stack_grows_down = StackGrowsDown();
+ const size_t stack_size = getpagesize();
+ // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+ void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+ void* const stack_top =
+ static_cast<char*>(stack) + (stack_grows_down ? stack_size : 0);
+
+ child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+ GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+ }
+#else
+ const bool use_fork = true;
+#endif // GTEST_HAS_CLONE
+
+ if (use_fork && (child_pid = fork()) == 0) {
+ ExecDeathTestChildMain(&args);
+ _exit(0);
+ }
+
GTEST_DEATH_TEST_CHECK_(child_pid != -1);
- GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
return child_pid;
}
@@ -622,7 +943,7 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() {
const int death_test_index = info->result()->death_test_count();
if (flag != NULL) {
- set_write_fd(flag->status_fd);
+ set_write_fd(flag->write_fd());
return EXECUTE_TEST;
}
@@ -634,19 +955,18 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() {
const String filter_flag =
String::Format("--%s%s=%s.%s",
- GTEST_FLAG_PREFIX, kFilterFlag,
+ GTEST_FLAG_PREFIX_, kFilterFlag,
info->test_case_name(), info->name());
const String internal_flag =
- String::Format("--%s%s=%s:%d:%d:%d",
- GTEST_FLAG_PREFIX, kInternalRunDeathTestFlag, file_, line_,
- death_test_index, pipe_fd[1]);
+ String::Format("--%s%s=%s|%d|%d|%d",
+ GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag,
+ file_, line_, death_test_index, pipe_fd[1]);
Arguments args;
args.AddArguments(GetArgvs());
- args.AddArgument("--logtostderr");
args.AddArgument(filter_flag.c_str());
args.AddArgument(internal_flag.c_str());
- last_death_test_message = "";
+ DeathTest::set_last_death_test_message("");
CaptureStderr();
// See the comment in NoExecDeathTest::AssumeRole for why the next line
@@ -657,10 +977,12 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() {
GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
set_child_pid(child_pid);
set_read_fd(pipe_fd[0]);
- set_forked(true);
+ set_spawned(true);
return OVERSEE_TEST;
}
+#endif // !GTEST_OS_WINDOWS
+
// Creates a concrete DeathTest-derived class that depends on the
// --gtest_death_test_style flag, and sets the pointer pointed to
// by the "test" argument to its address. If the test should be
@@ -676,28 +998,36 @@ bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
->increment_death_test_count();
if (flag != NULL) {
- if (death_test_index > flag->index) {
- last_death_test_message = String::Format(
+ if (death_test_index > flag->index()) {
+ DeathTest::set_last_death_test_message(String::Format(
"Death test count (%d) somehow exceeded expected maximum (%d)",
- death_test_index, flag->index);
+ death_test_index, flag->index()));
return false;
}
- if (!(flag->file == file && flag->line == line &&
- flag->index == death_test_index)) {
+ if (!(flag->file() == file && flag->line() == line &&
+ flag->index() == death_test_index)) {
*test = NULL;
return true;
}
}
+#if GTEST_OS_WINDOWS
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new WindowsDeathTest(statement, regex, file, line);
+ }
+#else
if (GTEST_FLAG(death_test_style) == "threadsafe") {
*test = new ExecDeathTest(statement, regex, file, line);
} else if (GTEST_FLAG(death_test_style) == "fast") {
*test = new NoExecDeathTest(statement, regex);
- } else {
- last_death_test_message = String::Format(
+ }
+#endif // GTEST_OS_WINDOWS
+ else { // NOLINT - this is more readable than unbalanced brackets inside #if.
+ DeathTest::set_last_death_test_message(String::Format(
"Unknown death test style \"%s\" encountered",
- GTEST_FLAG(death_test_style).c_str());
+ GTEST_FLAG(death_test_style).c_str()));
return false;
}
@@ -711,7 +1041,7 @@ static void SplitString(const ::std::string& str, char delimiter,
::std::vector< ::std::string>* dest) {
::std::vector< ::std::string> parsed;
::std::string::size_type pos = 0;
- while (true) {
+ while (::testing::internal::AlwaysTrue()) {
const ::std::string::size_type colon = str.find(delimiter, pos);
if (colon == ::std::string::npos) {
parsed.push_back(str.substr(pos));
@@ -724,25 +1054,71 @@ static void SplitString(const ::std::string& str, char delimiter,
dest->swap(parsed);
}
-// Attempts to parse a string into a positive integer. Returns true
-// if that is possible. GTEST_HAS_DEATH_TEST implies that we have
-// ::std::string, so we can use it here.
-static bool ParsePositiveInt(const ::std::string& str, int* number) {
- // Fail fast if the given string does not begin with a digit;
- // this bypasses strtol's "optional leading whitespace and plus
- // or minus sign" semantics, which are undesirable here.
- if (str.empty() || !isdigit(str[0])) {
- return false;
+#if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+int GetStatusFileDescriptor(unsigned int parent_process_id,
+ size_t write_handle_as_size_t,
+ size_t event_handle_as_size_t) {
+ AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+ FALSE, // Non-inheritable.
+ parent_process_id));
+ if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+ DeathTestAbort(String::Format("Unable to open parent process %u",
+ parent_process_id));
}
- char* endptr;
- const long parsed = strtol(str.c_str(), &endptr, 10); // NOLINT
- if (*endptr == '\0' && parsed <= INT_MAX) {
- *number = static_cast<int>(parsed);
- return true;
- } else {
- return false;
+
+ // TODO(vladl@google.com): Replace the following check with a
+ // compile-time assertion when available.
+ GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+ const HANDLE write_handle =
+ reinterpret_cast<HANDLE>(write_handle_as_size_t);
+ HANDLE dup_write_handle;
+
+ // The newly initialized handle is accessible only in in the parent
+ // process. To obtain one accessible within the child, we need to use
+ // DuplicateHandle.
+ if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+ ::GetCurrentProcess(), &dup_write_handle,
+ 0x0, // Requested privileges ignored since
+ // DUPLICATE_SAME_ACCESS is used.
+ FALSE, // Request non-inheritable handler.
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the pipe handle %Iu from the parent process %u",
+ write_handle_as_size_t, parent_process_id));
}
+
+ const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+ HANDLE dup_event_handle;
+
+ if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+ ::GetCurrentProcess(), &dup_event_handle,
+ 0x0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the event handle %Iu from the parent process %u",
+ event_handle_as_size_t, parent_process_id));
+ }
+
+ const int write_fd =
+ ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+ if (write_fd == -1) {
+ DeathTestAbort(String::Format(
+ "Unable to convert pipe handle %Iu to a file descriptor",
+ write_handle_as_size_t));
+ }
+
+ // Signals the parent that the write end of the pipe has been acquired
+ // so the parent can release its own write end.
+ ::SetEvent(dup_event_handle);
+
+ return write_fd;
}
+#endif // GTEST_OS_WINDOWS
// Returns a newly created InternalRunDeathTestFlag object with fields
// initialized from the GTEST_FLAG(internal_run_death_test) flag if
@@ -750,22 +1126,43 @@ static bool ParsePositiveInt(const ::std::string& str, int* number) {
InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
- InternalRunDeathTestFlag* const internal_run_death_test_flag =
- new InternalRunDeathTestFlag;
// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
// can use it here.
+ int line = -1;
+ int index = -1;
::std::vector< ::std::string> fields;
- SplitString(GTEST_FLAG(internal_run_death_test).c_str(), ':', &fields);
+ SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+ int write_fd = -1;
+
+#if GTEST_OS_WINDOWS
+ unsigned int parent_process_id = 0;
+ size_t write_handle_as_size_t = 0;
+ size_t event_handle_as_size_t = 0;
+
+ if (fields.size() != 6
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &parent_process_id)
+ || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+ || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
+ }
+ write_fd = GetStatusFileDescriptor(parent_process_id,
+ write_handle_as_size_t,
+ event_handle_as_size_t);
+#else
if (fields.size() != 4
- || !ParsePositiveInt(fields[1], &internal_run_death_test_flag->line)
- || !ParsePositiveInt(fields[2], &internal_run_death_test_flag->index)
- || !ParsePositiveInt(fields[3],
- &internal_run_death_test_flag->status_fd)) {
- DeathTestAbort("Bad --gtest_internal_run_death_test flag: %s",
- GTEST_FLAG(internal_run_death_test).c_str());
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &write_fd)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
}
- internal_run_death_test_flag->file = fields[0].c_str();
- return internal_run_death_test_flag;
+#endif // GTEST_OS_WINDOWS
+ return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
}
} // namespace internal
diff --git a/contrib/llvm/utils/unittest/googletest/gtest-filepath.cc b/contrib/llvm/utils/unittest/googletest/gtest-filepath.cc
index 493ba0b..c1ef918 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest-filepath.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest-filepath.cc
@@ -34,23 +34,20 @@
#include <stdlib.h>
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
#include <windows.h>
-#elif defined(GTEST_OS_WINDOWS)
+#elif GTEST_OS_WINDOWS
#include <direct.h>
#include <io.h>
-#include <sys/stat.h>
-#elif defined(GTEST_OS_SYMBIAN)
+#elif GTEST_OS_SYMBIAN
// Symbian OpenC has PATH_MAX in sys/syslimits.h
#include <sys/syslimits.h>
-#include <unistd.h>
#else
#include <limits.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#endif // _WIN32_WCE or _WIN32
+#include <climits> // Some Linux distributions define PATH_MAX here.
+#endif // GTEST_OS_WINDOWS_MOBILE
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
#define GTEST_PATH_MAX_ _MAX_PATH
#elif defined(PATH_MAX)
#define GTEST_PATH_MAX_ PATH_MAX
@@ -65,10 +62,16 @@
namespace testing {
namespace internal {
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
const char kPathSeparatorString[] = "\\";
-#ifdef _WIN32_WCE
+const char kAlternatePathSeparatorString[] = "/";
+#if GTEST_OS_WINDOWS_MOBILE
// Windows CE doesn't have a current directory. You should not use
// the current directory in tests on Windows CE, but this at least
// provides a reasonable fallback.
@@ -77,26 +80,35 @@ const char kCurrentDirectoryString[] = "\\";
const DWORD kInvalidFileAttributes = 0xffffffff;
#else
const char kCurrentDirectoryString[] = ".\\";
-#endif // _WIN32_WCE
+#endif // GTEST_OS_WINDOWS_MOBILE
#else
const char kPathSeparator = '/';
const char kPathSeparatorString[] = "/";
const char kCurrentDirectoryString[] = "./";
#endif // GTEST_OS_WINDOWS
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+ return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+ return c == kPathSeparator;
+#endif
+}
+
// Returns the current working directory, or "" if unsuccessful.
FilePath FilePath::GetCurrentDir() {
-#ifdef _WIN32_WCE
-// Windows CE doesn't have a current directory, so we just return
-// something reasonable.
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE doesn't have a current directory, so we just return
+ // something reasonable.
return FilePath(kCurrentDirectoryString);
-#elif defined(GTEST_OS_WINDOWS)
- char cwd[GTEST_PATH_MAX_ + 1] = {};
+#elif GTEST_OS_WINDOWS
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
#else
- char cwd[GTEST_PATH_MAX_ + 1] = {};
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
-#endif
+#endif // GTEST_OS_WINDOWS_MOBILE
}
// Returns a copy of the FilePath with the case-insensitive extension removed.
@@ -106,11 +118,27 @@ FilePath FilePath::GetCurrentDir() {
FilePath FilePath::RemoveExtension(const char* extension) const {
String dot_extension(String::Format(".%s", extension));
if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) {
- return FilePath(String(pathname_.c_str(), pathname_.GetLength() - 4));
+ return FilePath(String(pathname_.c_str(), pathname_.length() - 4));
}
return *this;
}
+// Returns a pointer to the last occurence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+ const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+ const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+ // Comparing two pointers of which only one is NULL is undefined.
+ if (last_alt_sep != NULL &&
+ (last_sep == NULL || last_alt_sep > last_sep)) {
+ return last_alt_sep;
+ }
+#endif
+ return last_sep;
+}
+
// Returns a copy of the FilePath with the directory part removed.
// Example: FilePath("path/to/file").RemoveDirectoryName() returns
// FilePath("file"). If there is no directory part ("just_a_file"), it returns
@@ -118,7 +146,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const {
// returns an empty FilePath ("").
// On Windows platform, '\' is the path separator, otherwise it is '/'.
FilePath FilePath::RemoveDirectoryName() const {
- const char* const last_sep = strrchr(c_str(), kPathSeparator);
+ const char* const last_sep = FindLastPathSeparator();
return last_sep ? FilePath(String(last_sep + 1)) : *this;
}
@@ -129,9 +157,14 @@ FilePath FilePath::RemoveDirectoryName() const {
// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
// On Windows platform, '\' is the path separator, otherwise it is '/'.
FilePath FilePath::RemoveFileName() const {
- const char* const last_sep = strrchr(c_str(), kPathSeparator);
- return FilePath(last_sep ? String(c_str(), last_sep + 1 - c_str())
- : String(kCurrentDirectoryString));
+ const char* const last_sep = FindLastPathSeparator();
+ String dir;
+ if (last_sep) {
+ dir = String(c_str(), last_sep + 1 - c_str());
+ } else {
+ dir = kCurrentDirectoryString;
+ }
+ return FilePath(dir);
}
// Helper functions for naming files in a directory for xml output.
@@ -144,44 +177,54 @@ FilePath FilePath::MakeFileName(const FilePath& directory,
const FilePath& base_name,
int number,
const char* extension) {
- FilePath dir(directory.RemoveTrailingPathSeparator());
+ String file;
if (number == 0) {
- return FilePath(String::Format("%s%c%s.%s", dir.c_str(), kPathSeparator,
- base_name.c_str(), extension));
+ file = String::Format("%s.%s", base_name.c_str(), extension);
+ } else {
+ file = String::Format("%s_%d.%s", base_name.c_str(), number, extension);
}
- return FilePath(String::Format("%s%c%s_%d.%s", dir.c_str(), kPathSeparator,
- base_name.c_str(), number, extension));
+ return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path) {
+ if (directory.IsEmpty())
+ return relative_path;
+ const FilePath dir(directory.RemoveTrailingPathSeparator());
+ return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator,
+ relative_path.c_str()));
}
// Returns true if pathname describes something findable in the file-system,
// either a file, directory, or whatever.
bool FilePath::FileOrDirectoryExists() const {
-#ifdef GTEST_OS_WINDOWS
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
const DWORD attributes = GetFileAttributes(unicode);
delete [] unicode;
return attributes != kInvalidFileAttributes;
#else
- struct _stat file_stat = {};
- return _stat(pathname_.c_str(), &file_stat) == 0;
-#endif // _WIN32_WCE
-#else
- struct stat file_stat;
- return stat(pathname_.c_str(), &file_stat) == 0;
-#endif // GTEST_OS_WINDOWS
+ posix::StatStruct file_stat;
+ return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif // GTEST_OS_WINDOWS_MOBILE
}
// Returns true if pathname describes a directory in the file-system
// that exists.
bool FilePath::DirectoryExists() const {
bool result = false;
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// Don't strip off trailing separator if path is a root directory on
// Windows (like "C:\\").
const FilePath& path(IsRootDirectory() ? *this :
RemoveTrailingPathSeparator());
-#ifdef _WIN32_WCE
+#else
+ const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
const DWORD attributes = GetFileAttributes(unicode);
delete [] unicode;
@@ -190,30 +233,38 @@ bool FilePath::DirectoryExists() const {
result = true;
}
#else
- struct _stat file_stat = {};
- result = _stat(path.c_str(), &file_stat) == 0 &&
- (_S_IFDIR & file_stat.st_mode) != 0;
-#endif // _WIN32_WCE
-#else
- struct stat file_stat;
- result = stat(pathname_.c_str(), &file_stat) == 0 &&
- S_ISDIR(file_stat.st_mode);
-#endif // GTEST_OS_WINDOWS
+ posix::StatStruct file_stat;
+ result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+ posix::IsDir(file_stat);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
return result;
}
// Returns true if pathname describes a root directory. (Windows has one
// root directory per disk drive.)
bool FilePath::IsRootDirectory() const {
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
+ // TODO(wan@google.com): on Windows a network share like
+ // \\server\share can be a root directory, although it cannot be the
+ // current directory. Handle this properly.
+ return pathname_.length() == 3 && IsAbsolutePath();
+#else
+ return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
const char* const name = pathname_.c_str();
- return pathname_.GetLength() == 3 &&
+#if GTEST_OS_WINDOWS
+ return pathname_.length() >= 3 &&
((name[0] >= 'a' && name[0] <= 'z') ||
(name[0] >= 'A' && name[0] <= 'Z')) &&
name[1] == ':' &&
- name[2] == kPathSeparator;
+ IsPathSeparator(name[2]);
#else
- return pathname_ == kPathSeparatorString;
+ return IsPathSeparator(name[0]);
#endif
}
@@ -240,7 +291,8 @@ FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
// it is intended to represent a directory. Returns false otherwise.
// This does NOT check that a directory (or file) actually exists.
bool FilePath::IsDirectory() const {
- return pathname_.EndsWith(kPathSeparatorString);
+ return !pathname_.empty() &&
+ IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
}
// Create directories so that path exists. Returns true if successful or if
@@ -251,7 +303,7 @@ bool FilePath::CreateDirectoriesRecursively() const {
return false;
}
- if (pathname_.GetLength() == 0 || this->DirectoryExists()) {
+ if (pathname_.length() == 0 || this->DirectoryExists()) {
return true;
}
@@ -264,18 +316,17 @@ bool FilePath::CreateDirectoriesRecursively() const {
// directory for any reason, including if the parent directory does not
// exist. Not named "CreateDirectory" because that's a macro on Windows.
bool FilePath::CreateFolder() const {
-#ifdef GTEST_OS_WINDOWS
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
FilePath removed_sep(this->RemoveTrailingPathSeparator());
LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
int result = CreateDirectory(unicode, NULL) ? 0 : -1;
delete [] unicode;
-#else
+#elif GTEST_OS_WINDOWS
int result = _mkdir(pathname_.c_str());
-#endif // !WIN32_WCE
#else
int result = mkdir(pathname_.c_str(), 0777);
-#endif // _WIN32
+#endif // GTEST_OS_WINDOWS_MOBILE
+
if (result == -1) {
return this->DirectoryExists(); // An error is OK if the directory exists.
}
@@ -286,31 +337,39 @@ bool FilePath::CreateFolder() const {
// name, otherwise return the name string unmodified.
// On Windows platform, uses \ as the separator, other platforms use /.
FilePath FilePath::RemoveTrailingPathSeparator() const {
- return pathname_.EndsWith(kPathSeparatorString)
- ? FilePath(String(pathname_.c_str(), pathname_.GetLength() - 1))
+ return IsDirectory()
+ ? FilePath(String(pathname_.c_str(), pathname_.length() - 1))
: *this;
}
-// Normalize removes any redundant separators that might be in the pathname.
+// Removes any redundant separators that might be in the pathname.
// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
// redundancies that might be in a pathname involving "." or "..".
+// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
void FilePath::Normalize() {
if (pathname_.c_str() == NULL) {
pathname_ = "";
return;
}
const char* src = pathname_.c_str();
- char* const dest = new char[pathname_.GetLength() + 1];
+ char* const dest = new char[pathname_.length() + 1];
char* dest_ptr = dest;
- memset(dest_ptr, 0, pathname_.GetLength() + 1);
+ memset(dest_ptr, 0, pathname_.length() + 1);
while (*src != '\0') {
- *dest_ptr++ = *src;
- if (*src != kPathSeparator)
+ *dest_ptr = *src;
+ if (!IsPathSeparator(*src)) {
src++;
- else
- while (*src == kPathSeparator)
+ } else {
+#if GTEST_HAS_ALT_PATH_SEP_
+ if (*dest_ptr == kAlternatePathSeparator) {
+ *dest_ptr = kPathSeparator;
+ }
+#endif
+ while (IsPathSeparator(*src))
src++;
+ }
+ dest_ptr++;
}
*dest_ptr = '\0';
pathname_ = dest;
diff --git a/contrib/llvm/utils/unittest/googletest/gtest-port.cc b/contrib/llvm/utils/unittest/googletest/gtest-port.cc
index 9878cae..5609599 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest-port.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest-port.cc
@@ -35,29 +35,90 @@
#include <stdlib.h>
#include <stdio.h>
-#ifdef GTEST_HAS_DEATH_TEST
-#include <regex.h>
-#endif // GTEST_HAS_DEATH_TEST
-
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
#include <windows.h> // For TerminateProcess()
-#endif // _WIN32_WCE
+#elif GTEST_OS_WINDOWS
+#include <io.h>
+#include <sys/stat.h>
+#else
+#include <unistd.h>
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_MAC
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/vm_map.h>
+#endif // GTEST_OS_MAC
#include <gtest/gtest-spi.h>
#include <gtest/gtest-message.h>
#include <gtest/internal/gtest-string.h>
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "gtest/internal/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
namespace testing {
namespace internal {
-#ifdef GTEST_HAS_DEATH_TEST
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif // _MSC_VER
+
+#if GTEST_OS_MAC
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ const task_t task = mach_task_self();
+ mach_msg_type_number_t thread_count;
+ thread_act_array_t thread_list;
+ const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+ if (status == KERN_SUCCESS) {
+ // task_threads allocates resources in thread_list and we need to free them
+ // to avoid leaks.
+ vm_deallocate(task,
+ reinterpret_cast<vm_address_t>(thread_list),
+ sizeof(thread_t) * thread_count);
+ return static_cast<size_t>(thread_count);
+ } else {
+ return 0;
+ }
+}
+
+#else
+
+size_t GetThreadCount() {
+ // There's no portable way to detect the number of threads, so we just
+ // return 0 to indicate that we cannot detect it.
+ return 0;
+}
+
+#endif // GTEST_OS_MAC
+
+#if GTEST_USES_POSIX_RE
// Implements RE. Currently only needed for death tests.
RE::~RE() {
- regfree(&partial_regex_);
- regfree(&full_regex_);
+ if (is_valid_) {
+ // regfree'ing an invalid regex might crash because the content
+ // of the regex is undefined. Since the regex's are essentially
+ // the same, one cannot be valid (or invalid) without the other
+ // being so too.
+ regfree(&partial_regex_);
+ regfree(&full_regex_);
+ }
free(const_cast<char*>(pattern_));
}
@@ -80,7 +141,7 @@ bool RE::PartialMatch(const char* str, const RE& re) {
// Initializes an RE from its string representation.
void RE::Init(const char* regex) {
- pattern_ = strdup(regex);
+ pattern_ = posix::StrDup(regex);
// Reserves enough bytes to hold the regular expression used for a
// full match.
@@ -93,7 +154,14 @@ void RE::Init(const char* regex) {
// previous expression returns false. Otherwise partial_regex_ may
// not be properly initialized can may cause trouble when it's
// freed.
- is_valid_ = (regcomp(&partial_regex_, regex, REG_EXTENDED) == 0) && is_valid_;
+ //
+ // Some implementation of POSIX regex (e.g. on at least some
+ // versions of Cygwin) doesn't accept the empty string as a valid
+ // regex. We change it to an equivalent form "()" to be safe.
+ if (is_valid_) {
+ const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+ is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+ }
EXPECT_TRUE(is_valid_)
<< "Regular expression \"" << regex
<< "\" is not a valid POSIX Extended regular expression.";
@@ -101,77 +169,362 @@ void RE::Init(const char* regex) {
delete[] full_pattern;
}
-#endif // GTEST_HAS_DEATH_TEST
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true iff ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+ return ch != '\0' && strchr(str, ch) != NULL;
+}
+
+// Returns true iff ch belongs to the given classification. Unlike
+// similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsPunct(char ch) {
+ return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsWordChar(char ch) {
+ return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true iff "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+ return (IsPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true iff the given atom (specified by escaped and pattern)
+// matches ch. The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+ if (escaped) { // "\\p" where p is pattern_char.
+ switch (pattern_char) {
+ case 'd': return IsDigit(ch);
+ case 'D': return !IsDigit(ch);
+ case 'f': return ch == '\f';
+ case 'n': return ch == '\n';
+ case 'r': return ch == '\r';
+ case 's': return IsWhiteSpace(ch);
+ case 'S': return !IsWhiteSpace(ch);
+ case 't': return ch == '\t';
+ case 'v': return ch == '\v';
+ case 'w': return IsWordChar(ch);
+ case 'W': return !IsWordChar(ch);
+ }
+ return IsPunct(pattern_char) && pattern_char == ch;
+ }
+
+ return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+String FormatRegexSyntaxError(const char* regex, int index) {
+ return (Message() << "Syntax error at index " << index
+ << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+ if (regex == NULL) {
+ // TODO(wan@google.com): fix the source file location in the
+ // assertion failures to match where the regex is used in user
+ // code.
+ ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+ return false;
+ }
+
+ bool is_valid = true;
+
+ // True iff ?, *, or + can follow the previous atom.
+ bool prev_repeatable = false;
+ for (int i = 0; regex[i]; i++) {
+ if (regex[i] == '\\') { // An escape sequence
+ i++;
+ if (regex[i] == '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "'\\' cannot appear at the end.";
+ return false;
+ }
+
+ if (!IsValidEscape(regex[i])) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "invalid escape sequence \"\\" << regex[i] << "\".";
+ is_valid = false;
+ }
+ prev_repeatable = true;
+ } else { // Not an escape sequence.
+ const char ch = regex[i];
+
+ if (ch == '^' && i > 0) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'^' can only appear at the beginning.";
+ is_valid = false;
+ } else if (ch == '$' && regex[i + 1] != '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'$' can only appear at the end.";
+ is_valid = false;
+ } else if (IsInSet(ch, "()[]{}|")) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' is unsupported.";
+ is_valid = false;
+ } else if (IsRepeat(ch) && !prev_repeatable) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' can only follow a repeatable token.";
+ is_valid = false;
+ }
+
+ prev_repeatable = !IsInSet(ch, "^$?*+");
+ }
+ }
+
+ return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression. The regex atom is defined as c if escaped is false,
+// or \c otherwise. repeat is the repetition meta character (?, *,
+// or +). The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway. We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char c, char repeat, const char* regex,
+ const char* str) {
+ const size_t min_count = (repeat == '+') ? 1 : 0;
+ const size_t max_count = (repeat == '?') ? 1 :
+ static_cast<size_t>(-1) - 1;
+ // We cannot call numeric_limits::max() as it conflicts with the
+ // max() macro on Windows.
+
+ for (size_t i = 0; i <= max_count; ++i) {
+ // We know that the atom matches each of the first i characters in str.
+ if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+ // We have enough matches at the head, and the tail matches too.
+ // Since we only care about *whether* the pattern matches str
+ // (as opposed to *how* it matches), there is no need to find a
+ // greedy match.
+ return true;
+ }
+ if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+ return false;
+ }
+ return false;
+}
+
+// Returns true iff regex matches a prefix of str. regex must be a
+// valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+ if (*regex == '\0') // An empty regex matches a prefix of anything.
+ return true;
+
+ // "$" only matches the end of a string. Note that regex being
+ // valid guarantees that there's nothing after "$" in it.
+ if (*regex == '$')
+ return *str == '\0';
+
+ // Is the first thing in regex an escape sequence?
+ const bool escaped = *regex == '\\';
+ if (escaped)
+ ++regex;
+ if (IsRepeat(regex[1])) {
+ // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+ // here's an indirect recursion. It terminates as the regex gets
+ // shorter in each recursion.
+ return MatchRepetitionAndRegexAtHead(
+ escaped, regex[0], regex[1], regex + 2, str);
+ } else {
+ // regex isn't empty, isn't "$", and doesn't start with a
+ // repetition. We match the first atom of regex with the first
+ // character of str and recurse.
+ return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+ MatchRegexAtHead(regex + 1, str + 1);
+ }
+}
+
+// Returns true iff regex matches any substring of str. regex must be
+// a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally. In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+ if (regex == NULL || str == NULL)
+ return false;
+
+ if (*regex == '^')
+ return MatchRegexAtHead(regex + 1, str);
+
+ // A successful match can be anywhere in str.
+ do {
+ if (MatchRegexAtHead(regex, str))
+ return true;
+ } while (*str++ != '\0');
+ return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+ free(const_cast<char*>(pattern_));
+ free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = full_pattern_ = NULL;
+ if (regex != NULL) {
+ pattern_ = posix::StrDup(regex);
+ }
+
+ is_valid_ = ValidateRegex(regex);
+ if (!is_valid_) {
+ // No need to calculate the full pattern when the regex is invalid.
+ return;
+ }
+
+ const size_t len = strlen(regex);
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match: we need space to prepend a '^', append a '$', and
+ // terminate the string with '\0'.
+ char* buffer = static_cast<char*>(malloc(len + 3));
+ full_pattern_ = buffer;
+
+ if (*regex != '^')
+ *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'.
-// Logs a message at the given severity level.
-void GTestLog(GTestLogSeverity severity, const char* file,
- int line, const char* msg) {
+ // We don't use snprintf or strncpy, as they trigger a warning when
+ // compiled with VC++ 8.0.
+ memcpy(buffer, regex, len);
+ buffer += len;
+
+ if (len == 0 || regex[len - 1] != '$')
+ *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'.
+
+ *buffer = '\0';
+}
+
+#endif // GTEST_USES_POSIX_RE
+
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+ : severity_(severity) {
const char* const marker =
severity == GTEST_INFO ? "[ INFO ]" :
severity == GTEST_WARNING ? "[WARNING]" :
severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
- fprintf(stderr, "\n%s %s:%d: %s\n", marker, file, line, msg);
- if (severity == GTEST_FATAL) {
- abort();
- }
+ GetStream() << ::std::endl << marker << " "
+ << FormatFileLocation(file, line).c_str() << ": ";
}
-#ifdef GTEST_HAS_DEATH_TEST
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+ GetStream() << ::std::endl;
+ if (severity_ == GTEST_FATAL) {
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4996)
+#endif // _MSC_VER
-// Defines the stderr capturer.
+#if GTEST_HAS_STREAM_REDIRECTION_
-class CapturedStderr {
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
public:
- // The ctor redirects stderr to a temporary file.
- CapturedStderr() {
- uncaptured_fd_ = dup(STDERR_FILENO);
-
+ // The ctor redirects the stream to a temporary file.
+ CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+#if GTEST_OS_WINDOWS
+ char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+ char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+
+ ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+ const UINT success = ::GetTempFileNameA(temp_dir_path,
+ "gtest_redir",
+ 0, // Generate unique file name.
+ temp_file_path);
+ GTEST_CHECK_(success != 0)
+ << "Unable to create a temporary file in " << temp_dir_path;
+ const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+ GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+ << temp_file_path;
+ filename_ = temp_file_path;
+#else
// There's no guarantee that a test has write access to the
// current directory, so we create the temporary file in the /tmp
// directory instead.
- char name_template[] = "/tmp/captured_stderr.XXXXXX";
+ char name_template[] = "/tmp/captured_stream.XXXXXX";
const int captured_fd = mkstemp(name_template);
filename_ = name_template;
+#endif // GTEST_OS_WINDOWS
fflush(NULL);
- dup2(captured_fd, STDERR_FILENO);
+ dup2(captured_fd, fd_);
close(captured_fd);
}
- ~CapturedStderr() {
+ ~CapturedStream() {
remove(filename_.c_str());
}
- // Stops redirecting stderr.
- void StopCapture() {
- // Restores the original stream.
- fflush(NULL);
- dup2(uncaptured_fd_, STDERR_FILENO);
- close(uncaptured_fd_);
- uncaptured_fd_ = -1;
+ String GetCapturedString() {
+ if (uncaptured_fd_ != -1) {
+ // Restores the original stream.
+ fflush(NULL);
+ dup2(uncaptured_fd_, fd_);
+ close(uncaptured_fd_);
+ uncaptured_fd_ = -1;
+ }
+
+ FILE* const file = posix::FOpen(filename_.c_str(), "r");
+ const String content = ReadEntireFile(file);
+ posix::FClose(file);
+ return content;
}
- // Returns the name of the temporary file holding the stderr output.
- // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
- // can use it here.
- ::std::string filename() const { return filename_; }
-
private:
+ // Reads the entire content of a file as a String.
+ static String ReadEntireFile(FILE* file);
+
+ // Returns the size (in bytes) of a file.
+ static size_t GetFileSize(FILE* file);
+
+ const int fd_; // A stream to capture.
int uncaptured_fd_;
+ // Name of the temporary file holding the stderr output.
::std::string filename_;
-};
-static CapturedStderr* g_captured_stderr = NULL;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
// Returns the size (in bytes) of a file.
-static size_t GetFileSize(FILE * file) {
+size_t CapturedStream::GetFileSize(FILE* file) {
fseek(file, 0, SEEK_END);
return static_cast<size_t>(ftell(file));
}
// Reads the entire content of a file as a string.
-// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can
-// use it here.
-static ::std::string ReadEntireFile(FILE * file) {
+String CapturedStream::ReadEntireFile(FILE* file) {
const size_t file_size = GetFileSize(file);
char* const buffer = new char[file_size];
@@ -187,35 +540,58 @@ static ::std::string ReadEntireFile(FILE * file) {
bytes_read += bytes_last_read;
} while (bytes_last_read > 0 && bytes_read < file_size);
- const ::std::string content(buffer, buffer+bytes_read);
+ const String content(buffer, bytes_read);
delete[] buffer;
return content;
}
-// Starts capturing stderr.
-void CaptureStderr() {
- if (g_captured_stderr != NULL) {
- GTEST_LOG_(FATAL, "Only one stderr capturer can exist at one time.");
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif // _MSC_VER
+
+static CapturedStream* g_captured_stderr = NULL;
+static CapturedStream* g_captured_stdout = NULL;
+
+// Starts capturing an output stream (stdout/stderr).
+void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+ if (*stream != NULL) {
+ GTEST_LOG_(FATAL) << "Only one " << stream_name
+ << " capturer can exist at a time.";
}
- g_captured_stderr = new CapturedStderr;
+ *stream = new CapturedStream(fd);
}
-// Stops capturing stderr and returns the captured string.
-// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can
-// use it here.
-::std::string GetCapturedStderr() {
- g_captured_stderr->StopCapture();
- FILE* const file = fopen(g_captured_stderr->filename().c_str(), "r");
- const ::std::string content = ReadEntireFile(file);
- fclose(file);
+// Stops capturing the output stream and returns the captured string.
+String GetCapturedStream(CapturedStream** captured_stream) {
+ const String content = (*captured_stream)->GetCapturedString();
- delete g_captured_stderr;
- g_captured_stderr = NULL;
+ delete *captured_stream;
+ *captured_stream = NULL;
return content;
}
+// Starts capturing stdout.
+void CaptureStdout() {
+ CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+ CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); }
+
+// Stops capturing stderr and returns the captured string.
+String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); }
+
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
+#if GTEST_HAS_DEATH_TEST
+
// A copy of all command line arguments. Set by InitGoogleTest().
::std::vector<String> g_argvs;
@@ -224,38 +600,30 @@ const ::std::vector<String>& GetArgvs() { return g_argvs; }
#endif // GTEST_HAS_DEATH_TEST
-#ifdef _WIN32_WCE
-void abort() {
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
DebugBreak();
TerminateProcess(GetCurrentProcess(), 1);
}
-#endif // _WIN32_WCE
+} // namespace posix
+#endif // GTEST_OS_WINDOWS_MOBILE
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "GTEST_FOO" in the open-source version.
static String FlagToEnvVar(const char* flag) {
- const String full_flag = (Message() << GTEST_FLAG_PREFIX << flag).GetString();
+ const String full_flag =
+ (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
Message env_var;
- for (int i = 0; i != full_flag.GetLength(); i++) {
+ for (size_t i = 0; i != full_flag.length(); i++) {
env_var << static_cast<char>(toupper(full_flag.c_str()[i]));
}
return env_var.GetString();
}
-// Reads and returns the Boolean environment variable corresponding to
-// the given flag; if it's not set, returns default_value.
-//
-// The value is considered true iff it's not "0".
-bool BoolFromGTestEnv(const char* flag, bool default_value) {
- const String env_var = FlagToEnvVar(flag);
- const char* const string_value = GetEnv(env_var.c_str());
- return string_value == NULL ?
- default_value : strcmp(string_value, "0") != 0;
-}
-
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
@@ -297,12 +665,23 @@ bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
return true;
}
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ return string_value == NULL ?
+ default_value : strcmp(string_value, "0") != 0;
+}
+
// Reads and returns a 32-bit integer stored in the environment
// variable corresponding to the given flag; if it isn't set or
// doesn't represent a valid 32-bit integer, returns default_value.
Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
const String env_var = FlagToEnvVar(flag);
- const char* const string_value = GetEnv(env_var.c_str());
+ const char* const string_value = posix::GetEnv(env_var.c_str());
if (string_value == NULL) {
// The environment variable is not set.
return default_value;
@@ -324,7 +703,7 @@ Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
// the given flag; if it's not set, returns default_value.
const char* StringFromGTestEnv(const char* flag, const char* default_value) {
const String env_var = FlagToEnvVar(flag);
- const char* const value = GetEnv(env_var.c_str());
+ const char* const value = posix::GetEnv(env_var.c_str());
return value == NULL ? default_value : value;
}
diff --git a/contrib/llvm/utils/unittest/googletest/gtest-test-part.cc b/contrib/llvm/utils/unittest/googletest/gtest-test-part.cc
index 2e80f21..8249afe 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest-test-part.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest-test-part.cc
@@ -38,12 +38,14 @@
// included, or there will be a compiler error. This trick is to
// prevent a user from accidentally including gtest-internal-inl.h in
// his code.
-#define GTEST_IMPLEMENTATION
+#define GTEST_IMPLEMENTATION_ 1
#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
+#undef GTEST_IMPLEMENTATION_
namespace testing {
+using internal::GetUnitTestImpl;
+
// Gets the summary of the failure message by omitting the stack trace
// in it.
internal::String TestPartResult::ExtractSummary(const char* message) {
@@ -54,61 +56,45 @@ internal::String TestPartResult::ExtractSummary(const char* message) {
// Prints a TestPartResult object.
std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
- return os << result.file_name() << ":"
- << result.line_number() << ": "
- << (result.type() == TPRT_SUCCESS ? "Success" :
- result.type() == TPRT_FATAL_FAILURE ? "Fatal failure" :
- "Non-fatal failure") << ":\n"
- << result.message() << std::endl;
-}
-
-// Constructs an empty TestPartResultArray.
-TestPartResultArray::TestPartResultArray()
- : list_(new internal::List<TestPartResult>) {
-}
-
-// Destructs a TestPartResultArray.
-TestPartResultArray::~TestPartResultArray() {
- delete list_;
+ return os
+ << result.file_name() << ":" << result.line_number() << ": "
+ << (result.type() == TestPartResult::kSuccess ? "Success" :
+ result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
+ "Non-fatal failure") << ":\n"
+ << result.message() << std::endl;
}
// Appends a TestPartResult to the array.
void TestPartResultArray::Append(const TestPartResult& result) {
- list_->PushBack(result);
+ array_.push_back(result);
}
// Returns the TestPartResult at the given index (0-based).
const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
if (index < 0 || index >= size()) {
printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
- internal::abort();
- }
-
- const internal::ListNode<TestPartResult>* p = list_->Head();
- for (int i = 0; i < index; i++) {
- p = p->next();
+ internal::posix::Abort();
}
- return p->element();
+ return array_[index];
}
// Returns the number of TestPartResult objects in the array.
int TestPartResultArray::size() const {
- return list_->size();
+ return static_cast<int>(array_.size());
}
namespace internal {
HasNewFatalFailureHelper::HasNewFatalFailureHelper()
: has_new_fatal_failure_(false),
- original_reporter_(UnitTest::GetInstance()->impl()->
+ original_reporter_(GetUnitTestImpl()->
GetTestPartResultReporterForCurrentThread()) {
- UnitTest::GetInstance()->impl()->SetTestPartResultReporterForCurrentThread(
- this);
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
}
HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
- UnitTest::GetInstance()->impl()->SetTestPartResultReporterForCurrentThread(
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
original_reporter_);
}
diff --git a/contrib/llvm/utils/unittest/googletest/gtest-typed-test.cc b/contrib/llvm/utils/unittest/googletest/gtest-typed-test.cc
index d42a159..3cc4b5d 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest-typed-test.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest-typed-test.cc
@@ -35,7 +35,15 @@
namespace testing {
namespace internal {
-#ifdef GTEST_HAS_TYPED_TEST_P
+#if GTEST_HAS_TYPED_TEST_P
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+ while (isspace(*str))
+ str++;
+ return str;
+}
// Verifies that registered_tests match the test names in
// defined_test_names_; returns registered_tests if successful, or
@@ -45,6 +53,10 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames(
typedef ::std::set<const char*>::const_iterator DefinedTestIter;
registered_ = true;
+ // Skip initial whitespace in registered_tests since some
+ // preprocessors prefix stringizied literals with whitespace.
+ registered_tests = SkipSpaces(registered_tests);
+
Message errors;
::std::set<String> tests;
for (const char* names = registered_tests; names != NULL;
@@ -85,7 +97,8 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames(
if (errors_str != "") {
fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
errors_str.c_str());
- abort();
+ fflush(stderr);
+ posix::Abort();
}
return registered_tests;
diff --git a/contrib/llvm/utils/unittest/googletest/gtest.cc b/contrib/llvm/utils/unittest/googletest/gtest.cc
index b5a654f..aa2d5bb 100644
--- a/contrib/llvm/utils/unittest/googletest/gtest.cc
+++ b/contrib/llvm/utils/unittest/googletest/gtest.cc
@@ -39,15 +39,19 @@
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
-#include <string.h>
#include <wchar.h>
#include <wctype.h>
-#ifdef GTEST_OS_LINUX
+#include <algorithm>
+#include <ostream>
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
// TODO(kenton@google.com): Use autoconf to detect availability of
// gettimeofday().
-#define GTEST_HAS_GETTIMEOFDAY
+#define GTEST_HAS_GETTIMEOFDAY_ 1
#include <fcntl.h>
#include <limits.h>
@@ -60,38 +64,38 @@
#include <string>
#include <vector>
-#elif defined(GTEST_OS_SYMBIAN)
-#define GTEST_HAS_GETTIMEOFDAY
+#elif GTEST_OS_SYMBIAN
+#define GTEST_HAS_GETTIMEOFDAY_ 1
#include <sys/time.h> // NOLINT
-#elif defined(GTEST_OS_ZOS)
-#define GTEST_HAS_GETTIMEOFDAY
+#elif GTEST_OS_ZOS
+#define GTEST_HAS_GETTIMEOFDAY_ 1
#include <sys/time.h> // NOLINT
// On z/OS we additionally need strings.h for strcasecmp.
-#include <strings.h>
+#include <strings.h> // NOLINT
-#elif defined(_WIN32_WCE) // We are on Windows CE.
+#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE.
#include <windows.h> // NOLINT
-#elif defined(GTEST_OS_WINDOWS) // We are on Windows proper.
+#elif GTEST_OS_WINDOWS // We are on Windows proper.
#include <io.h> // NOLINT
#include <sys/timeb.h> // NOLINT
#include <sys/types.h> // NOLINT
#include <sys/stat.h> // NOLINT
-#if defined(__MINGW__) || defined(__MINGW32__)
+#if GTEST_OS_WINDOWS_MINGW
// MinGW has gettimeofday() but not _ftime64().
// TODO(kenton@google.com): Use autoconf to detect availability of
// gettimeofday().
// TODO(kenton@google.com): There are other ways to get the time on
// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW
// supports these. consider using them instead.
-#define GTEST_HAS_GETTIMEOFDAY
+#define GTEST_HAS_GETTIMEOFDAY_ 1
#include <sys/time.h> // NOLINT
-#endif
+#endif // GTEST_OS_WINDOWS_MINGW
// cpplint thinks that the header is already included, so we want to
// silence it.
@@ -102,13 +106,17 @@
// Assume other platforms have gettimeofday().
// TODO(kenton@google.com): Use autoconf to detect availability of
// gettimeofday().
-#define GTEST_HAS_GETTIMEOFDAY
+#define GTEST_HAS_GETTIMEOFDAY_ 1
// cpplint thinks that the header is already included, so we want to
// silence it.
#include <sys/time.h> // NOLINT
#include <unistd.h> // NOLINT
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+#include <stdexcept>
#endif
// Indicates that this translation unit is part of Google Test's
@@ -116,18 +124,21 @@
// included, or there will be a compiler error. This trick is to
// prevent a user from accidentally including gtest-internal-inl.h in
// his code.
-#define GTEST_IMPLEMENTATION
+#define GTEST_IMPLEMENTATION_ 1
#include "gtest/internal/gtest-internal-inl.h"
-#undef GTEST_IMPLEMENTATION
+#undef GTEST_IMPLEMENTATION_
-#ifdef GTEST_OS_WINDOWS
-#define fileno _fileno
-#define isatty _isatty
+#if GTEST_OS_WINDOWS
#define vsnprintf _vsnprintf
#endif // GTEST_OS_WINDOWS
namespace testing {
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
// Constants.
// A test whose test case name or test name matches this filter is
@@ -145,15 +156,31 @@ static const char kUniversalFilter[] = "*";
// The default output file for XML output.
static const char kDefaultOutputFile[] = "test_detail.xml";
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
namespace internal {
// The text used in failure messages to indicate the start of the
// stack trace.
const char kStackTraceMarker[] = "\nStack trace:\n";
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
} // namespace internal
GTEST_DEFINE_bool_(
+ also_run_disabled_tests,
+ internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+ "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
break_on_failure,
internal::BoolFromGTestEnv("break_on_failure", false),
"True iff a failed assertion should be a debugger break-point.");
@@ -161,7 +188,7 @@ GTEST_DEFINE_bool_(
GTEST_DEFINE_bool_(
catch_exceptions,
internal::BoolFromGTestEnv("catch_exceptions", false),
- "True iff " GTEST_NAME
+ "True iff " GTEST_NAME_
" should catch exceptions and treat them as test failures.");
GTEST_DEFINE_string_(
@@ -170,7 +197,7 @@ GTEST_DEFINE_string_(
"Whether to use colors in the output. Valid values: yes, no, "
"and auto. 'auto' means to use colors if the output is "
"being sent to a terminal and the TERM environment variable "
- "is set to xterm or xterm-color.");
+ "is set to xterm, xterm-color, xterm-256color, linux or cygwin.");
GTEST_DEFINE_string_(
filter,
@@ -198,29 +225,67 @@ GTEST_DEFINE_string_(
GTEST_DEFINE_bool_(
print_time,
- internal::BoolFromGTestEnv("print_time", false),
- "True iff " GTEST_NAME
+ internal::BoolFromGTestEnv("print_time", true),
+ "True iff " GTEST_NAME_
" should display elapsed time in text output.");
GTEST_DEFINE_int32_(
+ random_seed,
+ internal::Int32FromGTestEnv("random_seed", 0),
+ "Random number seed to use when shuffling test orders. Must be in range "
+ "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
repeat,
internal::Int32FromGTestEnv("repeat", 1),
"How many times to repeat each test. Specify a negative number "
"for repeating forever. Useful for shaking out flaky tests.");
+GTEST_DEFINE_bool_(
+ show_internal_stack_frames, false,
+ "True iff " GTEST_NAME_ " should include internal stack frames when "
+ "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+ shuffle,
+ internal::BoolFromGTestEnv("shuffle", false),
+ "True iff " GTEST_NAME_
+ " should randomize tests' order on every run.");
+
GTEST_DEFINE_int32_(
stack_trace_depth,
- internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+ internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
"The maximum number of stack frames to print when an "
"assertion fails. The valid range is 0 through 100, inclusive.");
GTEST_DEFINE_bool_(
- show_internal_stack_frames, false,
- "True iff " GTEST_NAME " should include internal stack frames when "
- "printing test failure stack traces.");
+ throw_on_failure,
+ internal::BoolFromGTestEnv("throw_on_failure", false),
+ "When this flag is specified, a failed assertion will throw an exception "
+ "if exceptions are enabled or exit the program with a non-zero code "
+ "otherwise.");
namespace internal {
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG). Crashes if 'range' is 0 or greater
+// than kMaxRange.
+UInt32 Random::Generate(UInt32 range) {
+ // These constants are the same as are used in glibc's rand(3).
+ state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+ GTEST_CHECK_(range > 0)
+ << "Cannot generate a number in the range [0, 0).";
+ GTEST_CHECK_(range <= kMaxRange)
+ << "Generation of a number in [0, " << range << ") was requested, "
+ << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+ // Converting via modulus introduces a bit of downward bias, but
+ // it's simple, and a linear congruential generator isn't too good
+ // to begin with.
+ return state_ % range;
+}
+
// GTestIsInitialized() returns true iff the user has initialized
// Google Test. Useful for catching the user mistake of not initializing
// Google Test before calling RUN_ALL_TESTS().
@@ -232,16 +297,14 @@ namespace internal {
int g_init_gtest_count = 0;
static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
-// Iterates over a list of TestCases, keeping a running sum of the
+// Iterates over a vector of TestCases, keeping a running sum of the
// results of calling a given int-returning method on each.
// Returns the sum.
-static int SumOverTestCaseList(const internal::List<TestCase*>& case_list,
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
int (TestCase::*method)() const) {
int sum = 0;
- for (const internal::ListNode<TestCase*>* node = case_list.Head();
- node != NULL;
- node = node->next()) {
- sum += (node->element()->*method)();
+ for (size_t i = 0; i < case_list.size(); i++) {
+ sum += (case_list[i]->*method)();
}
return sum;
}
@@ -263,16 +326,22 @@ static bool ShouldRunTestCase(const TestCase* test_case) {
}
// AssertHelper constructor.
-AssertHelper::AssertHelper(TestPartResultType type, const char* file,
- int line, const char* message)
- : type_(type), file_(file), line_(line), message_(message) {
+AssertHelper::AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message)
+ : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+ delete data_;
}
// Message assignment, for assertion streaming support.
void AssertHelper::operator=(const Message& message) const {
UnitTest::GetInstance()->
- AddTestPartResult(type_, file_, line_,
- AppendUserMessage(message_, message),
+ AddTestPartResult(data_->type, data_->file, data_->line,
+ AppendUserMessage(data_->message, message),
UnitTest::GetInstance()->impl()
->CurrentOsStackTraceExceptTop(1)
// Skips the stack frame for this function itself.
@@ -280,7 +349,7 @@ void AssertHelper::operator=(const Message& message) const {
}
// Mutex for linked pointers.
-Mutex g_linked_ptr_mutex(Mutex::NO_CONSTRUCTOR_NEEDED_FOR_STATIC_MUTEX);
+GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
// Application pathname gotten in InitGoogleTest.
String g_executable_path;
@@ -290,11 +359,11 @@ String g_executable_path;
FilePath GetCurrentExecutableName() {
FilePath result;
-#if defined(_WIN32_WCE) || defined(GTEST_OS_WINDOWS)
+#if GTEST_OS_WINDOWS
result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
#else
result.Set(FilePath(g_executable_path));
-#endif // _WIN32_WCE || GTEST_OS_WINDOWS
+#endif // GTEST_OS_WINDOWS
return result.RemoveDirectoryName();
}
@@ -314,16 +383,28 @@ String UnitTestOptions::GetOutputFormat() {
// Returns the name of the requested output file, or the default if none
// was explicitly specified.
-String UnitTestOptions::GetOutputFile() {
+String UnitTestOptions::GetAbsolutePathToOutputFile() {
const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
if (gtest_output_flag == NULL)
return String("");
const char* const colon = strchr(gtest_output_flag, ':');
if (colon == NULL)
- return String(kDefaultOutputFile);
+ return String(internal::FilePath::ConcatPaths(
+ internal::FilePath(
+ UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(kDefaultOutputFile)).ToString() );
internal::FilePath output_name(colon + 1);
+ if (!output_name.IsAbsolutePath())
+ // TODO(wan@google.com): on Windows \some\path is not an absolute
+ // path (as its meaning depends on the current drive), yet the
+ // following logic for turning it into an absolute path is wrong.
+ // Fix it.
+ output_name = internal::FilePath::ConcatPaths(
+ internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(colon + 1));
+
if (!output_name.IsDirectory())
return output_name.ToString();
@@ -357,7 +438,7 @@ bool UnitTestOptions::PatternMatchesString(const char *pattern,
bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) {
const char *cur_pattern = filter;
- while (true) {
+ for (;;) {
if (PatternMatchesString(cur_pattern, name.c_str())) {
return true;
}
@@ -395,7 +476,7 @@ bool UnitTestOptions::FilterMatchesTest(const String &test_case_name,
positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
negative = String("");
} else {
- positive.Set(p, dash - p); // Everything up to the dash
+ positive = String(p, dash - p); // Everything up to the dash
negative = String(dash+1); // Everything after the dash
if (positive.empty()) {
// Treat '-test1' as the same as '*-test1'
@@ -409,7 +490,7 @@ bool UnitTestOptions::FilterMatchesTest(const String &test_case_name,
!MatchesFilter(full_name, negative.c_str()));
}
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
// This function is useful as an __except condition.
@@ -426,46 +507,6 @@ int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
} // namespace internal
-// The interface for printing the result of a UnitTest
-class UnitTestEventListenerInterface {
- public:
- // The d'tor is pure virtual as this is an abstract class.
- virtual ~UnitTestEventListenerInterface() = 0;
-
- // Called before the unit test starts.
- virtual void OnUnitTestStart(const UnitTest*) {}
-
- // Called after the unit test ends.
- virtual void OnUnitTestEnd(const UnitTest*) {}
-
- // Called before the test case starts.
- virtual void OnTestCaseStart(const TestCase*) {}
-
- // Called after the test case ends.
- virtual void OnTestCaseEnd(const TestCase*) {}
-
- // Called before the global set-up starts.
- virtual void OnGlobalSetUpStart(const UnitTest*) {}
-
- // Called after the global set-up ends.
- virtual void OnGlobalSetUpEnd(const UnitTest*) {}
-
- // Called before the global tear-down starts.
- virtual void OnGlobalTearDownStart(const UnitTest*) {}
-
- // Called after the global tear-down ends.
- virtual void OnGlobalTearDownEnd(const UnitTest*) {}
-
- // Called before the test starts.
- virtual void OnTestStart(const TestInfo*) {}
-
- // Called after the test ends.
- virtual void OnTestEnd(const TestInfo*) {}
-
- // Called after an assertion.
- virtual void OnNewTestPartResult(const TestPartResult*) {}
-};
-
// The c'tor sets this object as the test part result reporter used by
// Google Test. The 'result' parameter specifies where to report the
// results. Intercepts only failures from the current thread.
@@ -487,7 +528,7 @@ ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
}
void ScopedFakeTestPartResultReporter::Init() {
- internal::UnitTestImpl* const impl = UnitTest::GetInstance()->impl();
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
old_reporter_ = impl->GetGlobalTestPartResultReporter();
impl->SetGlobalTestPartResultReporter(this);
@@ -500,7 +541,7 @@ void ScopedFakeTestPartResultReporter::Init() {
// The d'tor restores the test part result reporter used by Google Test
// before.
ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
- internal::UnitTestImpl* const impl = UnitTest::GetInstance()->impl();
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
impl->SetGlobalTestPartResultReporter(old_reporter_);
} else {
@@ -541,11 +582,11 @@ AssertionResult HasOneFailure(const char* /* results_expr */,
const char* /* type_expr */,
const char* /* substr_expr */,
const TestPartResultArray& results,
- TestPartResultType type,
+ TestPartResult::Type type,
const char* substr) {
- const String expected(
- type == TPRT_FATAL_FAILURE ? "1 fatal failure" :
- "1 non-fatal failure");
+ const String expected(type == TestPartResult::kFatalFailure ?
+ "1 fatal failure" :
+ "1 non-fatal failure");
Message msg;
if (results.size() != 1) {
msg << "Expected: " << expected << "\n"
@@ -580,7 +621,7 @@ AssertionResult HasOneFailure(const char* /* results_expr */,
// substring the failure message should contain.
SingleFailureChecker:: SingleFailureChecker(
const TestPartResultArray* results,
- TestPartResultType type,
+ TestPartResult::Type type,
const char* substr)
: results_(results),
type_(type),
@@ -600,7 +641,7 @@ DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
const TestPartResult& result) {
unit_test_->current_test_result()->AddTestPartResult(result);
- unit_test_->result_printer()->OnNewTestPartResult(&result);
+ unit_test_->listeners()->repeater()->OnTestPartResult(result);
}
DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
@@ -639,23 +680,23 @@ void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
// Gets the number of successful test cases.
int UnitTestImpl::successful_test_case_count() const {
- return test_cases_.CountIf(TestCasePassed);
+ return CountIf(test_cases_, TestCasePassed);
}
// Gets the number of failed test cases.
int UnitTestImpl::failed_test_case_count() const {
- return test_cases_.CountIf(TestCaseFailed);
+ return CountIf(test_cases_, TestCaseFailed);
}
// Gets the number of all test cases.
int UnitTestImpl::total_test_case_count() const {
- return test_cases_.size();
+ return static_cast<int>(test_cases_.size());
}
// Gets the number of all test cases that contain at least one test
// that should run.
int UnitTestImpl::test_case_to_run_count() const {
- return test_cases_.CountIf(ShouldRunTestCase);
+ return CountIf(test_cases_, ShouldRunTestCase);
}
// Gets the number of successful tests.
@@ -698,11 +739,13 @@ String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
return String("");
}
-static TimeInMillis GetTimeInMillis() {
-#ifdef _WIN32_WCE // We are on Windows CE
- // Difference between 1970-01-01 and 1601-01-01 in miliseconds.
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+ // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
// http://analogous.blogspot.com/2005/04/epoch.html
- const TimeInMillis kJavaEpochToWinFileTimeDelta = 11644473600000UL;
+ const TimeInMillis kJavaEpochToWinFileTimeDelta =
+ static_cast<TimeInMillis>(116444736UL) * 100000UL;
const DWORD kTenthMicrosInMilliSecond = 10000;
SYSTEMTIME now_systime;
@@ -719,7 +762,7 @@ static TimeInMillis GetTimeInMillis() {
return now_int64.QuadPart;
}
return 0;
-#elif defined(GTEST_OS_WINDOWS) && !defined(GTEST_HAS_GETTIMEOFDAY)
+#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
__timeb64 now;
#ifdef _MSC_VER
// MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
@@ -734,7 +777,7 @@ static TimeInMillis GetTimeInMillis() {
_ftime64(&now);
#endif // _MSC_VER
return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
-#elif defined(GTEST_HAS_GETTIMEOFDAY)
+#elif GTEST_HAS_GETTIMEOFDAY_
struct timeval now;
gettimeofday(&now, NULL);
return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
@@ -767,16 +810,7 @@ static char* CloneString(const char* str, size_t length) {
return NULL;
} else {
char* const clone = new char[length + 1];
- // MSVC 8 deprecates strncpy(), so we want to suppress warning
- // 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- strncpy(clone, str, length);
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- strncpy(clone, str, length);
-#endif // GTEST_OS_WINDOWS
+ posix::StrNCpy(clone, str, length);
clone[length] = '\0';
return clone;
}
@@ -790,7 +824,7 @@ const char * String::CloneCString(const char* c_str) {
NULL : CloneString(c_str, strlen(c_str));
}
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
// Creates a UTF-16 wide string from the given ANSI string, allocating
// memory using new. The caller is responsible for deleting the return
// value using delete[]. Returns the wide string, or NULL if the
@@ -824,7 +858,7 @@ const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
return ansi;
}
-#endif // _WIN32_WCE
+#endif // GTEST_OS_WINDOWS_MOBILE
// Compares two C strings. Returns true iff they have the same content.
//
@@ -843,17 +877,17 @@ bool String::CStringEquals(const char * lhs, const char * rhs) {
// Converts an array of wide chars to a narrow string using the UTF-8
// encoding, and streams the result to the given Message object.
-static void StreamWideCharsToMessage(const wchar_t* wstr, size_t len,
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
Message* msg) {
// TODO(wan): consider allowing a testing::String object to
// contain '\0'. This will make it behave more like std::string,
// and will allow ToUtf8String() to return the correct encoding
// for '\0' s.t. we can get rid of the conditional here (and in
// several other places).
- for (size_t i = 0; i != len; ) { // NOLINT
+ for (size_t i = 0; i != length; ) { // NOLINT
if (wstr[i] != L'\0') {
- *msg << WideStringToUtf8(wstr + i, static_cast<int>(len - i));
- while (i != len && wstr[i] != L'\0')
+ *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+ while (i != length && wstr[i] != L'\0')
i++;
} else {
*msg << '\0';
@@ -926,21 +960,37 @@ String FormatForFailureMessage(wchar_t wchar) {
} // namespace internal
-// AssertionResult constructor.
-AssertionResult::AssertionResult(const internal::String& failure_message)
- : failure_message_(failure_message) {
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+ : success_(other.success_),
+ message_(other.message_.get() != NULL ?
+ new internal::String(*other.message_) :
+ static_cast<internal::String*>(NULL)) {
}
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+ AssertionResult negation(!success_);
+ if (message_.get() != NULL)
+ negation << *message_;
+ return negation;
+}
// Makes a successful assertion result.
AssertionResult AssertionSuccess() {
- return AssertionResult();
+ return AssertionResult(true);
}
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+ return AssertionResult(false);
+}
// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
AssertionResult AssertionFailure(const Message& message) {
- return AssertionResult(message.GetString());
+ return AssertionFailure() << message;
}
namespace internal {
@@ -982,6 +1032,20 @@ AssertionResult EqFailure(const char* expected_expression,
return AssertionFailure(msg);
}
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value) {
+ const char* actual_message = assertion_result.message();
+ Message msg;
+ msg << "Value of: " << expression_text
+ << "\n Actual: " << actual_predicate_value;
+ if (actual_message[0] != '\0')
+ msg << " (" << actual_message << ")";
+ msg << "\nExpected: " << expected_predicate_value;
+ return msg.GetString();
+}
// Helper function for implementing ASSERT_NEAR.
AssertionResult DoubleNearPredFormat(const char* expr1,
@@ -1260,7 +1324,6 @@ AssertionResult IsNotSubstring(
return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
}
-#if GTEST_HAS_STD_STRING
AssertionResult IsSubstring(
const char* needle_expr, const char* haystack_expr,
const ::std::string& needle, const ::std::string& haystack) {
@@ -1272,7 +1335,6 @@ AssertionResult IsNotSubstring(
const ::std::string& needle, const ::std::string& haystack) {
return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
}
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_STD_WSTRING
AssertionResult IsSubstring(
@@ -1290,7 +1352,7 @@ AssertionResult IsNotSubstring(
namespace internal {
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
namespace {
@@ -1298,7 +1360,7 @@ namespace {
AssertionResult HRESULTFailureHelper(const char* expr,
const char* expected,
long hr) { // NOLINT
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
// Windows CE doesn't support FormatMessage.
const char error_text[] = "";
#else
@@ -1322,7 +1384,7 @@ AssertionResult HRESULTFailureHelper(const char* expr,
--message_length) {
error_text[message_length - 1] = '\0';
}
-#endif // _WIN32_WCE
+#endif // GTEST_OS_WINDOWS_MOBILE
const String error_hex(String::Format("0x%08X ", hr));
Message msg;
@@ -1416,17 +1478,8 @@ char* CodePointToUtf8(UInt32 code_point, char* str) {
// the terminating nul character). We are asking for 32 character
// buffer just in case. This is also enough for strncpy to
// null-terminate the destination string.
- // MSVC 8 deprecates strncpy(), so we want to suppress warning
- // 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
-#endif
- strncpy(str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(),
- 32);
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
-#pragma warning(pop) // Restores the warning state.
-#endif
+ posix::StrNCpy(
+ str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32);
str[31] = '\0'; // Makes sure no change in the format to strncpy leaves
// the result unterminated.
}
@@ -1441,23 +1494,19 @@ char* CodePointToUtf8(UInt32 code_point, char* str) {
// and thus should be combined into a single Unicode code point
// using CreateCodePointFromUtf16SurrogatePair.
inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
- if (sizeof(wchar_t) == 2)
- return (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
- else
- return false;
+ return sizeof(wchar_t) == 2 &&
+ (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
}
// Creates a Unicode code point from UTF16 surrogate pair.
inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
wchar_t second) {
- if (sizeof(wchar_t) == 2) {
- const UInt32 mask = (1 << 10) - 1;
- return (((first & mask) << 10) | (second & mask)) + 0x10000;
- } else {
- // This should not be called, but we provide a sensible default
- // in case it is.
- return static_cast<UInt32>(first);
- }
+ const UInt32 mask = (1 << 10) - 1;
+ return (sizeof(wchar_t) == 2) ?
+ (((first & mask) << 10) | (second & mask)) + 0x10000 :
+ // This function should not be called when the condition is
+ // false, but we provide a sensible default in case it is.
+ static_cast<UInt32>(first);
}
// Converts a wide string to a narrow string in UTF-8 encoding.
@@ -1568,15 +1617,11 @@ AssertionResult CmpHelperSTRNE(const char* s1_expression,
// NULL C string is considered different to any non-NULL C string,
// including the empty string.
bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
- if ( lhs == NULL ) return rhs == NULL;
-
- if ( rhs == NULL ) return false;
-
-#ifdef GTEST_OS_WINDOWS
- return _stricmp(lhs, rhs) == 0;
-#else // GTEST_OS_WINDOWS
- return strcasecmp(lhs, rhs) == 0;
-#endif // GTEST_OS_WINDOWS
+ if (lhs == NULL)
+ return rhs == NULL;
+ if (rhs == NULL)
+ return false;
+ return posix::StrCaseCmp(lhs, rhs) == 0;
}
// Compares two wide C strings, ignoring case. Returns true iff they
@@ -1597,9 +1642,9 @@ bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
if ( rhs == NULL ) return false;
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
return _wcsicmp(lhs, rhs) == 0;
-#elif defined(GTEST_OS_LINUX)
+#elif GTEST_OS_LINUX
return wcscasecmp(lhs, rhs) == 0;
#else
// Mac OS X and Cygwin don't define wcscasecmp. Other unknown OSes
@@ -1610,27 +1655,33 @@ bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
right = towlower(*rhs++);
} while (left && left == right);
return left == right;
-#endif // OS selector
-}
-
-// Constructs a String by copying a given number of chars from a
-// buffer. E.g. String("hello", 3) will create the string "hel".
-String::String(const char * buffer, size_t len) {
- char * const temp = new char[ len + 1 ];
- memcpy(temp, buffer, len);
- temp[ len ] = '\0';
- c_str_ = temp;
+#endif // OS selector
}
// Compares this with another String.
// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
// if this is greater than rhs.
int String::Compare(const String & rhs) const {
- if ( c_str_ == NULL ) {
- return rhs.c_str_ == NULL ? 0 : -1; // NULL < anything except NULL
+ const char* const lhs_c_str = c_str();
+ const char* const rhs_c_str = rhs.c_str();
+
+ if (lhs_c_str == NULL) {
+ return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL
+ } else if (rhs_c_str == NULL) {
+ return 1;
}
- return rhs.c_str_ == NULL ? 1 : strcmp(c_str_, rhs.c_str_);
+ const size_t shorter_str_len =
+ length() <= rhs.length() ? length() : rhs.length();
+ for (size_t i = 0; i != shorter_str_len; i++) {
+ if (lhs_c_str[i] < rhs_c_str[i]) {
+ return -1;
+ } else if (lhs_c_str[i] > rhs_c_str[i]) {
+ return 1;
+ }
+ }
+ return (length() < rhs.length()) ? -1 :
+ (length() > rhs.length()) ? 1 : 0;
}
// Returns true iff this String ends with the given suffix. *Any*
@@ -1638,12 +1689,12 @@ int String::Compare(const String & rhs) const {
bool String::EndsWith(const char* suffix) const {
if (suffix == NULL || CStringEquals(suffix, "")) return true;
- if (c_str_ == NULL) return false;
+ if (c_str() == NULL) return false;
- const size_t this_len = strlen(c_str_);
+ const size_t this_len = strlen(c_str());
const size_t suffix_len = strlen(suffix);
return (this_len >= suffix_len) &&
- CStringEquals(c_str_ + this_len - suffix_len, suffix);
+ CStringEquals(c_str() + this_len - suffix_len, suffix);
}
// Returns true iff this String ends with the given suffix, ignoring case.
@@ -1651,37 +1702,12 @@ bool String::EndsWith(const char* suffix) const {
bool String::EndsWithCaseInsensitive(const char* suffix) const {
if (suffix == NULL || CStringEquals(suffix, "")) return true;
- if (c_str_ == NULL) return false;
+ if (c_str() == NULL) return false;
- const size_t this_len = strlen(c_str_);
+ const size_t this_len = strlen(c_str());
const size_t suffix_len = strlen(suffix);
return (this_len >= suffix_len) &&
- CaseInsensitiveCStringEquals(c_str_ + this_len - suffix_len, suffix);
-}
-
-// Sets the 0-terminated C string this String object represents. The
-// old string in this object is deleted, and this object will own a
-// clone of the input string. This function copies only up to length
-// bytes (plus a terminating null byte), or until the first null byte,
-// whichever comes first.
-//
-// This function works even when the c_str parameter has the same
-// value as that of the c_str_ field.
-void String::Set(const char * c_str, size_t length) {
- // Makes sure this works when c_str == c_str_
- const char* const temp = CloneString(c_str, length);
- delete[] c_str_;
- c_str_ = temp;
-}
-
-// Assigns a C string to this object. Self-assignment works.
-const String& String::operator=(const char* c_str) {
- // Makes sure this works when c_str == c_str_
- if (c_str != c_str_) {
- delete[] c_str_;
- c_str_ = CloneCString(c_str);
- }
- return *this;
+ CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix);
}
// Formats a list of arguments to a String, using the same format
@@ -1691,41 +1717,46 @@ const String& String::operator=(const char* c_str) {
// available.
//
// The result is limited to 4096 characters (including the tailing 0).
-// If 4096 characters are not enough to format the input,
-// "<buffer exceeded>" is returned.
+// If 4096 characters are not enough to format the input, or if
+// there's an error, "<formatting error or buffer exceeded>" is
+// returned.
String String::Format(const char * format, ...) {
va_list args;
va_start(args, format);
char buffer[4096];
+ const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]);
+
// MSVC 8 deprecates vsnprintf(), so we want to suppress warning
// 4996 (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS // We are on Windows.
+#ifdef _MSC_VER // We are using MSVC.
#pragma warning(push) // Saves the current warning state.
#pragma warning(disable:4996) // Temporarily disables warning 4996.
- const int size =
- vsnprintf(buffer, sizeof(buffer)/sizeof(buffer[0]) - 1, format, args);
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- const int size =
- vsnprintf(buffer, sizeof(buffer)/sizeof(buffer[0]) - 1, format, args);
-#endif // GTEST_OS_WINDOWS
+#else // We are not using MSVC.
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
+#endif // _MSC_VER
va_end(args);
- return String(size >= 0 ? buffer : "<buffer exceeded>");
+ // vsnprintf()'s behavior is not portable. When the buffer is not
+ // big enough, it returns a negative value in MSVC, and returns the
+ // needed buffer size on Linux. When there is an output error, it
+ // always returns a negative value. For simplicity, we lump the two
+ // error cases together.
+ if (size < 0 || size >= kBufferSize) {
+ return String("<formatting error or buffer exceeded>");
+ } else {
+ return String(buffer, size);
+ }
}
// Converts the buffer in a StrStream to a String, converting NUL
// bytes to "\\0" along the way.
String StrStreamToString(StrStream* ss) {
-#if GTEST_HAS_STD_STRING
const ::std::string& str = ss->str();
const char* const start = str.c_str();
const char* const end = start + str.length();
-#else
- const char* const start = ss->str();
- const char* const end = start + ss->pcount();
-#endif // GTEST_HAS_STD_STRING
// We need to use a helper StrStream to do this transformation
// because String doesn't support push_back().
@@ -1738,14 +1769,7 @@ String StrStreamToString(StrStream* ss) {
}
}
-#if GTEST_HAS_STD_STRING
return String(helper.str().c_str());
-#else
- const String str(helper.str(), helper.pcount());
- helper.freeze(false);
- ss->freeze(false);
- return str;
-#endif // GTEST_HAS_STD_STRING
}
// Appends the user-supplied message to the Google-Test-generated message.
@@ -1763,6 +1787,8 @@ String AppendUserMessage(const String& gtest_msg,
return msg.GetString();
}
+} // namespace internal
+
// class TestResult
// Creates an empty TestResult.
@@ -1775,9 +1801,32 @@ TestResult::TestResult()
TestResult::~TestResult() {
}
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+ if (i < 0 || i >= total_part_count())
+ internal::posix::Abort();
+ return test_part_results_.at(i);
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+ if (i < 0 || i >= test_property_count())
+ internal::posix::Abort();
+ return test_properties_.at(i);
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+ test_part_results_.clear();
+}
+
// Adds a test part result to the list.
void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
- test_part_results_.PushBack(test_part_result);
+ test_part_results_.push_back(test_part_result);
}
// Adds a test property to the list. If a property with the same key as the
@@ -1787,27 +1836,27 @@ void TestResult::RecordProperty(const TestProperty& test_property) {
if (!ValidateTestProperty(test_property)) {
return;
}
- MutexLock lock(&test_properites_mutex_);
- ListNode<TestProperty>* const node_with_matching_key =
- test_properties_.FindIf(TestPropertyKeyIs(test_property.key()));
- if (node_with_matching_key == NULL) {
- test_properties_.PushBack(test_property);
+ internal::MutexLock lock(&test_properites_mutex_);
+ const std::vector<TestProperty>::iterator property_with_matching_key =
+ std::find_if(test_properties_.begin(), test_properties_.end(),
+ internal::TestPropertyKeyIs(test_property.key()));
+ if (property_with_matching_key == test_properties_.end()) {
+ test_properties_.push_back(test_property);
return;
}
- TestProperty& property_with_matching_key = node_with_matching_key->element();
- property_with_matching_key.SetValue(test_property.value());
+ property_with_matching_key->SetValue(test_property.value());
}
// Adds a failure if the key is a reserved attribute of Google Test
// testcase tags. Returns true if the property is valid.
bool TestResult::ValidateTestProperty(const TestProperty& test_property) {
- String key(test_property.key());
+ internal::String key(test_property.key());
if (key == "name" || key == "status" || key == "time" || key == "classname") {
ADD_FAILURE()
<< "Reserved key used in RecordProperty(): "
<< key
<< " ('name', 'status', 'time', and 'classname' are reserved by "
- << GTEST_NAME << ")";
+ << GTEST_NAME_ << ")";
return false;
}
return true;
@@ -1815,49 +1864,51 @@ bool TestResult::ValidateTestProperty(const TestProperty& test_property) {
// Clears the object.
void TestResult::Clear() {
- test_part_results_.Clear();
- test_properties_.Clear();
+ test_part_results_.clear();
+ test_properties_.clear();
death_test_count_ = 0;
elapsed_time_ = 0;
}
-// Returns true iff the test part passed.
-static bool TestPartPassed(const TestPartResult & result) {
- return result.passed();
-}
-
-// Gets the number of successful test parts.
-int TestResult::successful_part_count() const {
- return test_part_results_.CountIf(TestPartPassed);
-}
-
-// Returns true iff the test part failed.
-static bool TestPartFailed(const TestPartResult & result) {
- return result.failed();
-}
-
-// Gets the number of failed test parts.
-int TestResult::failed_part_count() const {
- return test_part_results_.CountIf(TestPartFailed);
+// Returns true iff the test failed.
+bool TestResult::Failed() const {
+ for (int i = 0; i < total_part_count(); ++i) {
+ if (GetTestPartResult(i).failed())
+ return true;
+ }
+ return false;
}
// Returns true iff the test part fatally failed.
-static bool TestPartFatallyFailed(const TestPartResult & result) {
+static bool TestPartFatallyFailed(const TestPartResult& result) {
return result.fatally_failed();
}
// Returns true iff the test fatally failed.
bool TestResult::HasFatalFailure() const {
- return test_part_results_.CountIf(TestPartFatallyFailed) > 0;
+ return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true iff the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+ return result.nonfatally_failed();
+}
+
+// Returns true iff the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+ return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
}
// Gets the number of all test parts. This is the sum of the number
// of successful test parts and the number of failed test parts.
int TestResult::total_part_count() const {
- return test_part_results_.size();
+ return static_cast<int>(test_part_results_.size());
}
-} // namespace internal
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+ return static_cast<int>(test_properties_.size());
+}
// class Test
@@ -1897,7 +1948,23 @@ void Test::RecordProperty(const char* key, int value) {
RecordProperty(key, value_message.GetString().c_str());
}
-#ifdef GTEST_OS_WINDOWS
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message) {
+ // This function is a friend of UnitTest and as such has access to
+ // AddTestPartResult.
+ UnitTest::GetInstance()->AddTestPartResult(
+ result_type,
+ NULL, // No info about the source file where the exception occurred.
+ -1, // We have no info on which line caused the exception.
+ message,
+ String()); // No stack trace, either.
+}
+
+} // namespace internal
+
+#if GTEST_OS_WINDOWS
// We are on Windows.
// Adds an "exception thrown" fatal failure to the current test.
@@ -1907,15 +1974,8 @@ static void AddExceptionThrownFailure(DWORD exception_code,
message << "Exception thrown with code 0x" << std::setbase(16) <<
exception_code << std::setbase(10) << " in " << location << ".";
- UnitTest* const unit_test = UnitTest::GetInstance();
- unit_test->AddTestPartResult(
- TPRT_FATAL_FAILURE,
- static_cast<const char *>(NULL),
- // We have no info about the source file where the exception
- // occurred.
- -1, // We have no info on which line caused the exception.
- message.GetString(),
- internal::String(""));
+ internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+ message.GetString());
}
#endif // GTEST_OS_WINDOWS
@@ -1931,7 +1991,7 @@ bool Test::HasSameFixtureClass() {
// Info about the first test in the current test case.
const internal::TestInfoImpl* const first_test_info =
- test_case->test_info_list().Head()->element()->impl();
+ test_case->test_info_list()[0]->impl();
const internal::TypeId first_fixture_id = first_test_info->fixture_class_id();
const char* const first_test_name = first_test_info->name();
@@ -1993,8 +2053,8 @@ void Test::Run() {
if (!HasSameFixtureClass()) return;
internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
- // We are on Windows.
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
impl->os_stack_trace_getter()->UponLeavingGTest();
__try {
SetUp();
@@ -2025,7 +2085,7 @@ void Test::Run() {
AddExceptionThrownFailure(GetExceptionCode(), "TearDown()");
}
-#else // We are on Linux, Mac or MingW - exceptions are disabled.
+#else // We are on a compiler or platform that doesn't support SEH.
impl->os_stack_trace_getter()->UponLeavingGTest();
SetUp();
@@ -2040,7 +2100,7 @@ void Test::Run() {
// failed.
impl->os_stack_trace_getter()->UponLeavingGTest();
TearDown();
-#endif // GTEST_OS_WINDOWS
+#endif // GTEST_HAS_SEH
}
@@ -2049,18 +2109,24 @@ bool Test::HasFatalFailure() {
return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
}
+// Returns true iff the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->
+ HasNonfatalFailure();
+}
+
// class TestInfo
// Constructs a TestInfo object. It assumes ownership of the test factory
// object via impl_.
-TestInfo::TestInfo(const char* test_case_name,
- const char* name,
- const char* test_case_comment,
- const char* comment,
+TestInfo::TestInfo(const char* a_test_case_name,
+ const char* a_name,
+ const char* a_test_case_comment,
+ const char* a_comment,
internal::TypeId fixture_class_id,
internal::TestFactoryBase* factory) {
- impl_ = new internal::TestInfoImpl(this, test_case_name, name,
- test_case_comment, comment,
+ impl_ = new internal::TestInfoImpl(this, a_test_case_name, a_name,
+ a_test_case_comment, a_comment,
fixture_class_id, factory);
}
@@ -2102,7 +2168,7 @@ TestInfo* MakeAndRegisterTestInfo(
return test_info;
}
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
void ReportInvalidTestCaseType(const char* test_case_name,
const char* file, int line) {
Message errors;
@@ -2146,8 +2212,11 @@ const char* TestInfo::comment() const {
// Returns true if this test should run.
bool TestInfo::should_run() const { return impl_->should_run(); }
+// Returns true if this test matches the user-specified filter.
+bool TestInfo::matches_filter() const { return impl_->matches_filter(); }
+
// Returns the result of the test.
-const internal::TestResult* TestInfo::result() const { return impl_->result(); }
+const TestResult* TestInfo::result() const { return impl_->result(); }
// Increments the number of death tests encountered in this test so
// far.
@@ -2184,24 +2253,13 @@ class TestNameIs {
} // namespace
-// Finds and returns a TestInfo with the given name. If one doesn't
-// exist, returns NULL.
-TestInfo * TestCase::GetTestInfo(const char* test_name) {
- // Can we find a TestInfo with the given name?
- internal::ListNode<TestInfo *> * const node = test_info_list_->FindIf(
- TestNameIs(test_name));
-
- // Returns the TestInfo found.
- return node ? node->element() : NULL;
-}
-
namespace internal {
// This method expands all parameterized tests registered with macros TEST_P
// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
// This will be done just once during the program runtime.
void UnitTestImpl::RegisterParameterizedTests() {
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
if (!parameterized_tests_registered_) {
parameterized_test_registry_.RegisterTests();
parameterized_tests_registered_ = true;
@@ -2218,17 +2276,16 @@ void TestInfoImpl::Run() {
UnitTestImpl* const impl = internal::GetUnitTestImpl();
impl->set_current_test_info(parent_);
- // Notifies the unit test event listener that a test is about to
- // start.
- UnitTestEventListenerInterface* const result_printer =
- impl->result_printer();
- result_printer->OnTestStart(parent_);
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*parent_);
const TimeInMillis start = GetTimeInMillis();
impl->os_stack_trace_getter()->UponLeavingGTest();
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
- // We are on Windows.
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
Test* test = NULL;
__try {
@@ -2240,7 +2297,7 @@ void TestInfoImpl::Run() {
"the test fixture's constructor");
return;
}
-#else // We are on Linux, Mac OS or MingW - exceptions are disabled.
+#else // We are on a compiler or platform that doesn't support SEH.
// TODO(wan): If test->Run() throws, test won't be deleted. This is
// not a problem now as we don't use exceptions. If we were to
@@ -2249,7 +2306,7 @@ void TestInfoImpl::Run() {
// Creates the test object.
Test* test = factory_->CreateTest();
-#endif // GTEST_OS_WINDOWS
+#endif // GTEST_HAS_SEH
// Runs the test only if the constructor of the test fixture didn't
// generate a fatal failure.
@@ -2265,7 +2322,7 @@ void TestInfoImpl::Run() {
result_.set_elapsed_time(GetTimeInMillis() - start);
// Notifies the unit test event listener that a test has just finished.
- result_printer->OnTestEnd(parent_);
+ repeater->OnTestEnd(*parent_);
// Tells UnitTest to stop associating assertion results to this
// test.
@@ -2278,26 +2335,26 @@ void TestInfoImpl::Run() {
// Gets the number of successful tests in this test case.
int TestCase::successful_test_count() const {
- return test_info_list_->CountIf(TestPassed);
+ return CountIf(test_info_list_, TestPassed);
}
// Gets the number of failed tests in this test case.
int TestCase::failed_test_count() const {
- return test_info_list_->CountIf(TestFailed);
+ return CountIf(test_info_list_, TestFailed);
}
int TestCase::disabled_test_count() const {
- return test_info_list_->CountIf(TestDisabled);
+ return CountIf(test_info_list_, TestDisabled);
}
// Get the number of tests in this test case that should run.
int TestCase::test_to_run_count() const {
- return test_info_list_->CountIf(ShouldRunTest);
+ return CountIf(test_info_list_, ShouldRunTest);
}
// Gets the number of all tests.
int TestCase::total_test_count() const {
- return test_info_list_->size();
+ return static_cast<int>(test_info_list_.size());
}
// Creates a TestCase with the given name.
@@ -2307,32 +2364,42 @@ int TestCase::total_test_count() const {
// name: name of the test case
// set_up_tc: pointer to the function that sets up the test case
// tear_down_tc: pointer to the function that tears down the test case
-TestCase::TestCase(const char* name, const char* comment,
+TestCase::TestCase(const char* a_name, const char* a_comment,
Test::SetUpTestCaseFunc set_up_tc,
Test::TearDownTestCaseFunc tear_down_tc)
- : name_(name),
- comment_(comment),
+ : name_(a_name),
+ comment_(a_comment),
set_up_tc_(set_up_tc),
tear_down_tc_(tear_down_tc),
should_run_(false),
elapsed_time_(0) {
- test_info_list_ = new internal::List<TestInfo *>;
}
// Destructor of TestCase.
TestCase::~TestCase() {
// Deletes every Test in the collection.
- test_info_list_->ForEach(internal::Delete<TestInfo>);
+ ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestCase::GetTestInfo(int i) const {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
- // Then deletes the Test collection.
- delete test_info_list_;
- test_info_list_ = NULL;
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestCase::GetMutableTestInfo(int i) {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
}
// Adds a test to this test case. Will delete the test upon
// destruction of the TestCase object.
void TestCase::AddTestInfo(TestInfo * test_info) {
- test_info_list_->PushBack(test_info);
+ test_info_list_.push_back(test_info);
+ test_indices_.push_back(static_cast<int>(test_indices_.size()));
}
// Runs every test in this TestCase.
@@ -2342,38 +2409,62 @@ void TestCase::Run() {
internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
impl->set_current_test_case(this);
- UnitTestEventListenerInterface * const result_printer =
- impl->result_printer();
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
- result_printer->OnTestCaseStart(this);
+ repeater->OnTestCaseStart(*this);
impl->os_stack_trace_getter()->UponLeavingGTest();
set_up_tc_();
const internal::TimeInMillis start = internal::GetTimeInMillis();
- test_info_list_->ForEach(internal::TestInfoImpl::RunTest);
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->impl()->Run();
+ }
elapsed_time_ = internal::GetTimeInMillis() - start;
impl->os_stack_trace_getter()->UponLeavingGTest();
tear_down_tc_();
- result_printer->OnTestCaseEnd(this);
+ repeater->OnTestCaseEnd(*this);
impl->set_current_test_case(NULL);
}
// Clears the results of all tests in this test case.
void TestCase::ClearResult() {
- test_info_list_->ForEach(internal::TestInfoImpl::ClearTestResult);
+ ForEach(test_info_list_, internal::TestInfoImpl::ClearTestResult);
+}
+
+// Returns true iff test passed.
+bool TestCase::TestPassed(const TestInfo * test_info) {
+ const internal::TestInfoImpl* const impl = test_info->impl();
+ return impl->should_run() && impl->result()->Passed();
}
+// Returns true iff test failed.
+bool TestCase::TestFailed(const TestInfo * test_info) {
+ const internal::TestInfoImpl* const impl = test_info->impl();
+ return impl->should_run() && impl->result()->Failed();
+}
-// class UnitTestEventListenerInterface
+// Returns true iff test is disabled.
+bool TestCase::TestDisabled(const TestInfo * test_info) {
+ return test_info->impl()->is_disabled();
+}
-// The virtual d'tor.
-UnitTestEventListenerInterface::~UnitTestEventListenerInterface() {
+// Returns true if the given test should run.
+bool TestCase::ShouldRunTest(const TestInfo *test_info) {
+ return test_info->impl()->should_run();
}
-// A result printer that never prints anything. Used in the child process
-// of an exec-style death test to avoid needless output clutter.
-class NullUnitTestResultPrinter : public UnitTestEventListenerInterface {};
+// Shuffles the tests in this test case.
+void TestCase::ShuffleTests(internal::Random* random) {
+ Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestCase::UnshuffleTests() {
+ for (size_t i = 0; i < test_indices_.size(); i++) {
+ test_indices_[i] = static_cast<int>(i);
+ }
+}
// Formats a countable noun. Depending on its quantity, either the
// singular form or the plural form is used. e.g.
@@ -2397,17 +2488,17 @@ static internal::String FormatTestCaseCount(int test_case_count) {
return FormatCountableNoun(test_case_count, "test case", "test cases");
}
-// Converts a TestPartResultType enum to human-friendly string
-// representation. Both TPRT_NONFATAL_FAILURE and TPRT_FATAL_FAILURE
-// are translated to "Failure", as the user usually doesn't care about
-// the difference between the two when viewing the test result.
-static const char * TestPartResultTypeToString(TestPartResultType type) {
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation. Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
switch (type) {
- case TPRT_SUCCESS:
+ case TestPartResult::kSuccess:
return "Success";
- case TPRT_NONFATAL_FAILURE:
- case TPRT_FATAL_FAILURE:
+ case TestPartResult::kNonFatalFailure:
+ case TestPartResult::kFatalFailure:
#ifdef _MSC_VER
return "error: ";
#else
@@ -2418,15 +2509,33 @@ static const char * TestPartResultTypeToString(TestPartResultType type) {
return "Unknown result type";
}
+// Prints a TestPartResult to a String.
+static internal::String PrintTestPartResultToString(
+ const TestPartResult& test_part_result) {
+ return (Message()
+ << internal::FormatFileLocation(test_part_result.file_name(),
+ test_part_result.line_number())
+ << " " << TestPartResultTypeToString(test_part_result.type())
+ << test_part_result.message()).GetString();
+}
+
// Prints a TestPartResult.
-static void PrintTestPartResult(
- const TestPartResult & test_part_result) {
- printf("%s %s%s\n",
- internal::FormatFileLocation(test_part_result.file_name(),
- test_part_result.line_number()).c_str(),
- TestPartResultTypeToString(test_part_result.type()),
- test_part_result.message());
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+ const internal::String& result =
+ PrintTestPartResultToString(test_part_result);
+ printf("%s\n", result.c_str());
fflush(stdout);
+ // If the test program runs in Visual Studio or a debugger, the
+ // following statements add the test part result message to the Output
+ // window such that the user can double-click on it to jump to the
+ // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ // We don't call OutputDebugString*() on Windows Mobile, as printing
+ // to stdout is done by OutputDebugString() there already - we don't
+ // want the same message printed twice.
+ ::OutputDebugStringA(result.c_str());
+ ::OutputDebugStringA("\n");
+#endif
}
// class PrettyUnitTestResultPrinter
@@ -2434,12 +2543,13 @@ static void PrintTestPartResult(
namespace internal {
enum GTestColor {
+ COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW
};
-#if defined(GTEST_OS_WINDOWS) && !defined(_WIN32_WCE)
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
// Returns the character attribute for the given color.
WORD GetColorAttribute(GTestColor color) {
@@ -2447,39 +2557,42 @@ WORD GetColorAttribute(GTestColor color) {
case COLOR_RED: return FOREGROUND_RED;
case COLOR_GREEN: return FOREGROUND_GREEN;
case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
+ default: return 0;
}
- return 0;
}
#else
-// Returns the ANSI color code for the given color.
+// Returns the ANSI color code for the given color. COLOR_DEFAULT is
+// an invalid input.
const char* GetAnsiColorCode(GTestColor color) {
switch (color) {
case COLOR_RED: return "1";
case COLOR_GREEN: return "2";
case COLOR_YELLOW: return "3";
+ default: return NULL;
};
- return NULL;
}
-#endif // GTEST_OS_WINDOWS && !_WIN32_WCE
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
// Returns true iff Google Test should use colors in the output.
bool ShouldUseColor(bool stdout_is_tty) {
const char* const gtest_color = GTEST_FLAG(color).c_str();
if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return stdout_is_tty;
#else
// On non-Windows platforms, we rely on the TERM variable.
- const char* const term = GetEnv("TERM");
+ const char* const term = posix::GetEnv("TERM");
const bool term_supports_color =
String::CStringEquals(term, "xterm") ||
String::CStringEquals(term, "xterm-color") ||
+ String::CStringEquals(term, "xterm-256color") ||
+ String::CStringEquals(term, "linux") ||
String::CStringEquals(term, "cygwin");
return stdout_is_tty && term_supports_color;
#endif // GTEST_OS_WINDOWS
@@ -2502,11 +2615,13 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
-#if defined(_WIN32_WCE) || defined(GTEST_OS_SYMBIAN) || defined(GTEST_OS_ZOS)
- static const bool use_color = false;
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ const bool use_color = false;
#else
- static const bool use_color = ShouldUseColor(isatty(fileno(stdout)) != 0);
-#endif // !_WIN32_WCE
+ static const bool in_color_mode =
+ ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+ const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
+#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
// The '!= 0' comparison is necessary to satisfy MSVC 7.1.
if (!use_color) {
@@ -2515,7 +2630,7 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) {
return;
}
-#if defined(GTEST_OS_WINDOWS) && !defined(_WIN32_WCE)
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
@@ -2523,220 +2638,229 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) {
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
SetConsoleTextAttribute(stdout_handle,
GetColorAttribute(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
+ fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
printf("\033[0;3%sm", GetAnsiColorCode(color));
vprintf(fmt, args);
printf("\033[m"); // Resets the terminal to default.
-#endif // GTEST_OS_WINDOWS && !_WIN32_WCE
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
va_end(args);
}
-} // namespace internal
-
-using internal::ColoredPrintf;
-using internal::COLOR_RED;
-using internal::COLOR_GREEN;
-using internal::COLOR_YELLOW;
-
-// This class implements the UnitTestEventListenerInterface interface.
+// This class implements the TestEventListener interface.
//
// Class PrettyUnitTestResultPrinter is copyable.
-class PrettyUnitTestResultPrinter : public UnitTestEventListenerInterface {
+class PrettyUnitTestResultPrinter : public TestEventListener {
public:
PrettyUnitTestResultPrinter() {}
static void PrintTestName(const char * test_case, const char * test) {
printf("%s.%s", test_case, test);
}
- // The following methods override what's in the
- // UnitTestEventListenerInterface class.
- virtual void OnUnitTestStart(const UnitTest * unit_test);
- virtual void OnGlobalSetUpStart(const UnitTest*);
- virtual void OnTestCaseStart(const TestCase * test_case);
- virtual void OnTestCaseEnd(const TestCase * test_case);
- virtual void OnTestStart(const TestInfo * test_info);
- virtual void OnNewTestPartResult(const TestPartResult * result);
- virtual void OnTestEnd(const TestInfo * test_info);
- virtual void OnGlobalTearDownStart(const UnitTest*);
- virtual void OnUnitTestEnd(const UnitTest * unit_test);
+ // The following methods override what's in the TestEventListener class.
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
private:
+ static void PrintFailedTests(const UnitTest& unit_test);
+
internal::String test_case_name_;
};
-// Called before the unit test starts.
-void PrettyUnitTestResultPrinter::OnUnitTestStart(
- const UnitTest * unit_test) {
- const char * const filter = GTEST_FLAG(filter).c_str();
+ // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+ const UnitTest& unit_test, int iteration) {
+ if (GTEST_FLAG(repeat) != 1)
+ printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+ const char* const filter = GTEST_FLAG(filter).c_str();
// Prints the filter if it's not *. This reminds the user that some
// tests may be skipped.
if (!internal::String::CStringEquals(filter, kUniversalFilter)) {
ColoredPrintf(COLOR_YELLOW,
- "Note: %s filter = %s\n", GTEST_NAME, filter);
+ "Note: %s filter = %s\n", GTEST_NAME_, filter);
+ }
+
+ if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: This is test shard %s of %s.\n",
+ internal::posix::GetEnv(kTestShardIndex),
+ internal::posix::GetEnv(kTestTotalShards));
+ }
+
+ if (GTEST_FLAG(shuffle)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: Randomizing tests' orders with a seed of %d .\n",
+ unit_test.random_seed());
}
- const internal::UnitTestImpl* const impl = unit_test->impl();
ColoredPrintf(COLOR_GREEN, "[==========] ");
printf("Running %s from %s.\n",
- FormatTestCount(impl->test_to_run_count()).c_str(),
- FormatTestCaseCount(impl->test_case_to_run_count()).c_str());
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnGlobalSetUpStart(const UnitTest*) {
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+ const UnitTest& /*unit_test*/) {
ColoredPrintf(COLOR_GREEN, "[----------] ");
printf("Global test environment set-up.\n");
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnTestCaseStart(
- const TestCase * test_case) {
- test_case_name_ = test_case->name();
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ test_case_name_ = test_case.name();
const internal::String counts =
- FormatCountableNoun(test_case->test_to_run_count(), "test", "tests");
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
ColoredPrintf(COLOR_GREEN, "[----------] ");
printf("%s from %s", counts.c_str(), test_case_name_.c_str());
- if (test_case->comment()[0] == '\0') {
+ if (test_case.comment()[0] == '\0') {
printf("\n");
} else {
- printf(", where %s\n", test_case->comment());
+ printf(", where %s\n", test_case.comment());
}
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnTestCaseEnd(
- const TestCase * test_case) {
- if (!GTEST_FLAG(print_time)) return;
-
- test_case_name_ = test_case->name();
- const internal::String counts =
- FormatCountableNoun(test_case->test_to_run_count(), "test", "tests");
- ColoredPrintf(COLOR_GREEN, "[----------] ");
- printf("%s from %s (%s ms total)\n\n",
- counts.c_str(), test_case_name_.c_str(),
- internal::StreamableToString(test_case->elapsed_time()).c_str());
- fflush(stdout);
-}
-
-void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo * test_info) {
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
- PrintTestName(test_case_name_.c_str(), test_info->name());
- if (test_info->comment()[0] == '\0') {
+ PrintTestName(test_case_name_.c_str(), test_info.name());
+ if (test_info.comment()[0] == '\0') {
printf("\n");
} else {
- printf(", where %s\n", test_info->comment());
+ printf(", where %s\n", test_info.comment());
}
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo * test_info) {
- if (test_info->result()->Passed()) {
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ // If the test part succeeded, we don't need to do anything.
+ if (result.type() == TestPartResult::kSuccess)
+ return;
+
+ // Print failure message from the assertion (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Passed()) {
ColoredPrintf(COLOR_GREEN, "[ OK ] ");
} else {
ColoredPrintf(COLOR_RED, "[ FAILED ] ");
}
- PrintTestName(test_case_name_.c_str(), test_info->name());
+ PrintTestName(test_case_name_.c_str(), test_info.name());
if (GTEST_FLAG(print_time)) {
printf(" (%s ms)\n", internal::StreamableToString(
- test_info->result()->elapsed_time()).c_str());
+ test_info.result()->elapsed_time()).c_str());
} else {
printf("\n");
}
fflush(stdout);
}
-// Called after an assertion failure.
-void PrettyUnitTestResultPrinter::OnNewTestPartResult(
- const TestPartResult * result) {
- // If the test part succeeded, we don't need to do anything.
- if (result->type() == TPRT_SUCCESS)
- return;
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
- // Print failure message from the assertion (e.g. expected this and got that).
- PrintTestPartResult(*result);
+ test_case_name_ = test_case.name();
+ const internal::String counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n",
+ counts.c_str(), test_case_name_.c_str(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
fflush(stdout);
}
-void PrettyUnitTestResultPrinter::OnGlobalTearDownStart(const UnitTest*) {
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+ const UnitTest& /*unit_test*/) {
ColoredPrintf(COLOR_GREEN, "[----------] ");
printf("Global test environment tear-down\n");
fflush(stdout);
}
-namespace internal {
-
// Internal helper for printing the list of failed tests.
-static void PrintFailedTestsPretty(const UnitTestImpl* impl) {
- const int failed_test_count = impl->failed_test_count();
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+ const int failed_test_count = unit_test.failed_test_count();
if (failed_test_count == 0) {
return;
}
- for (const internal::ListNode<TestCase*>* node = impl->test_cases()->Head();
- node != NULL; node = node->next()) {
- const TestCase* const tc = node->element();
- if (!tc->should_run() || (tc->failed_test_count() == 0)) {
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+ const TestCase& test_case = *unit_test.GetTestCase(i);
+ if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
continue;
}
- for (const internal::ListNode<TestInfo*>* tinode =
- tc->test_info_list().Head();
- tinode != NULL; tinode = tinode->next()) {
- const TestInfo* const ti = tinode->element();
- if (!tc->ShouldRunTest(ti) || tc->TestPassed(ti)) {
+ for (int j = 0; j < test_case.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_case.GetTestInfo(j);
+ if (!test_info.should_run() || test_info.result()->Passed()) {
continue;
}
ColoredPrintf(COLOR_RED, "[ FAILED ] ");
- printf("%s.%s", ti->test_case_name(), ti->name());
- if (ti->test_case_comment()[0] != '\0' ||
- ti->comment()[0] != '\0') {
- printf(", where %s", ti->test_case_comment());
- if (ti->test_case_comment()[0] != '\0' &&
- ti->comment()[0] != '\0') {
+ printf("%s.%s", test_case.name(), test_info.name());
+ if (test_case.comment()[0] != '\0' ||
+ test_info.comment()[0] != '\0') {
+ printf(", where %s", test_case.comment());
+ if (test_case.comment()[0] != '\0' &&
+ test_info.comment()[0] != '\0') {
printf(" and ");
}
}
- printf("%s\n", ti->comment());
+ printf("%s\n", test_info.comment());
}
}
}
-} // namespace internal
-
-void PrettyUnitTestResultPrinter::OnUnitTestEnd(
- const UnitTest * unit_test) {
- const internal::UnitTestImpl* const impl = unit_test->impl();
-
+ void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
ColoredPrintf(COLOR_GREEN, "[==========] ");
printf("%s from %s ran.",
- FormatTestCount(impl->test_to_run_count()).c_str(),
- FormatTestCaseCount(impl->test_case_to_run_count()).c_str());
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
if (GTEST_FLAG(print_time)) {
printf(" (%s ms total)",
- internal::StreamableToString(impl->elapsed_time()).c_str());
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
}
printf("\n");
ColoredPrintf(COLOR_GREEN, "[ PASSED ] ");
- printf("%s.\n", FormatTestCount(impl->successful_test_count()).c_str());
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
- int num_failures = impl->failed_test_count();
- if (!impl->Passed()) {
- const int failed_test_count = impl->failed_test_count();
+ int num_failures = unit_test.failed_test_count();
+ if (!unit_test.Passed()) {
+ const int failed_test_count = unit_test.failed_test_count();
ColoredPrintf(COLOR_RED, "[ FAILED ] ");
printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
- internal::PrintFailedTestsPretty(impl);
+ PrintFailedTests(unit_test);
printf("\n%2d FAILED %s\n", num_failures,
num_failures == 1 ? "TEST" : "TESTS");
}
- int num_disabled = impl->disabled_test_count();
- if (num_disabled) {
+ int num_disabled = unit_test.disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
if (!num_failures) {
printf("\n"); // Add a spacer if no FAILURE banner is displayed.
}
@@ -2751,81 +2875,127 @@ void PrettyUnitTestResultPrinter::OnUnitTestEnd(
// End PrettyUnitTestResultPrinter
-// class UnitTestEventsRepeater
+// class TestEventRepeater
//
// This class forwards events to other event listeners.
-class UnitTestEventsRepeater : public UnitTestEventListenerInterface {
+class TestEventRepeater : public TestEventListener {
public:
- typedef internal::List<UnitTestEventListenerInterface *> Listeners;
- typedef internal::ListNode<UnitTestEventListenerInterface *> ListenersNode;
- UnitTestEventsRepeater() {}
- virtual ~UnitTestEventsRepeater();
- void AddListener(UnitTestEventListenerInterface *listener);
-
- virtual void OnUnitTestStart(const UnitTest* unit_test);
- virtual void OnUnitTestEnd(const UnitTest* unit_test);
- virtual void OnGlobalSetUpStart(const UnitTest* unit_test);
- virtual void OnGlobalSetUpEnd(const UnitTest* unit_test);
- virtual void OnGlobalTearDownStart(const UnitTest* unit_test);
- virtual void OnGlobalTearDownEnd(const UnitTest* unit_test);
- virtual void OnTestCaseStart(const TestCase* test_case);
- virtual void OnTestCaseEnd(const TestCase* test_case);
- virtual void OnTestStart(const TestInfo* test_info);
- virtual void OnTestEnd(const TestInfo* test_info);
- virtual void OnNewTestPartResult(const TestPartResult* result);
+ TestEventRepeater() : forwarding_enabled_(true) {}
+ virtual ~TestEventRepeater();
+ void Append(TestEventListener *listener);
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled() const { return forwarding_enabled_; }
+ void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+ virtual void OnTestProgramStart(const UnitTest& unit_test);
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& unit_test);
private:
- Listeners listeners_;
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled_;
+ // The list of listeners that receive events.
+ std::vector<TestEventListener*> listeners_;
- GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestEventsRepeater);
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
};
-UnitTestEventsRepeater::~UnitTestEventsRepeater() {
- for (ListenersNode* listener = listeners_.Head();
- listener != NULL;
- listener = listener->next()) {
- delete listener->element();
- }
+TestEventRepeater::~TestEventRepeater() {
+ ForEach(listeners_, Delete<TestEventListener>);
}
-void UnitTestEventsRepeater::AddListener(
- UnitTestEventListenerInterface *listener) {
- listeners_.PushBack(listener);
+void TestEventRepeater::Append(TestEventListener *listener) {
+ listeners_.push_back(listener);
}
-// Since the methods are identical, use a macro to reduce boilerplate.
-// This defines a member that repeats the call to all listeners.
+// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+ for (size_t i = 0; i < listeners_.size(); ++i) {
+ if (listeners_[i] == listener) {
+ listeners_.erase(listeners_.begin() + i);
+ return listener;
+ }
+ }
+
+ return NULL;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
#define GTEST_REPEATER_METHOD_(Name, Type) \
-void UnitTestEventsRepeater::Name(const Type* parameter) { \
- for (ListenersNode* listener = listeners_.Head(); \
- listener != NULL; \
- listener = listener->next()) { \
- listener->element()->Name(parameter); \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = 0; i < listeners_.size(); i++) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
+ listeners_[i]->Name(parameter); \
+ } \
} \
}
-GTEST_REPEATER_METHOD_(OnUnitTestStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnUnitTestEnd, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalSetUpStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalSetUpEnd, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalTearDownStart, UnitTest)
-GTEST_REPEATER_METHOD_(OnGlobalTearDownEnd, UnitTest)
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
-GTEST_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
-GTEST_REPEATER_METHOD_(OnTestEnd, TestInfo)
-GTEST_REPEATER_METHOD_(OnNewTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
-// End PrettyUnitTestResultPrinter
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = 0; i < listeners_.size(); i++) {
+ listeners_[i]->OnTestIterationStart(unit_test, iteration);
+ }
+ }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
+ listeners_[i]->OnTestIterationEnd(unit_test, iteration);
+ }
+ }
+}
+
+// End TestEventRepeater
// This class generates an XML output file.
-class XmlUnitTestResultPrinter : public UnitTestEventListenerInterface {
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
public:
explicit XmlUnitTestResultPrinter(const char* output_file);
- virtual void OnUnitTestEnd(const UnitTest* unit_test);
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
private:
// Is c a whitespace character that is normalized to a space character
@@ -2843,39 +3013,41 @@ class XmlUnitTestResultPrinter : public UnitTestEventListenerInterface {
// is_attribute is true, the text is meant to appear as an attribute
// value, and normalizable whitespace is preserved by replacing it
// with character references.
- static internal::String EscapeXml(const char* str,
- bool is_attribute);
+ static String EscapeXml(const char* str, bool is_attribute);
+
+ // Returns the given string with all characters invalid in XML removed.
+ static String RemoveInvalidXmlCharacters(const char* str);
// Convenience wrapper around EscapeXml when str is an attribute value.
- static internal::String EscapeXmlAttribute(const char* str) {
+ static String EscapeXmlAttribute(const char* str) {
return EscapeXml(str, true);
}
// Convenience wrapper around EscapeXml when str is not an attribute value.
- static internal::String EscapeXmlText(const char* str) {
- return EscapeXml(str, false);
- }
+ static String EscapeXmlText(const char* str) { return EscapeXml(str, false); }
+
+ // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+ static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
- // Prints an XML representation of a TestInfo object.
- static void PrintXmlTestInfo(FILE* out,
- const char* test_case_name,
- const TestInfo* test_info);
+ // Streams an XML representation of a TestInfo object.
+ static void OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info);
// Prints an XML representation of a TestCase object
- static void PrintXmlTestCase(FILE* out, const TestCase* test_case);
+ static void PrintXmlTestCase(FILE* out, const TestCase& test_case);
// Prints an XML summary of unit_test to output stream out.
- static void PrintXmlUnitTest(FILE* out, const UnitTest* unit_test);
+ static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test);
// Produces a string representing the test properties in a result as space
// delimited XML attributes based on the property key="value" pairs.
// When the String is not empty, it includes a space at the beginning,
// to delimit this attribute from prior attributes.
- static internal::String TestPropertiesAsXmlAttributes(
- const internal::TestResult* result);
+ static String TestPropertiesAsXmlAttributes(const TestResult& result);
// The output file.
- const internal::String output_file_;
+ const String output_file_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
};
@@ -2891,23 +3063,14 @@ XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
}
// Called after the unit test ends.
-void XmlUnitTestResultPrinter::OnUnitTestEnd(const UnitTest* unit_test) {
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
FILE* xmlout = NULL;
- internal::FilePath output_file(output_file_);
- internal::FilePath output_dir(output_file.RemoveFileName());
+ FilePath output_file(output_file_);
+ FilePath output_dir(output_file.RemoveFileName());
if (output_dir.CreateDirectoriesRecursively()) {
- // MSVC 8 deprecates fopen(), so we want to suppress warning 4996
- // (deprecated function) there.
-#ifdef GTEST_OS_WINDOWS
- // We are on Windows.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- xmlout = fopen(output_file_.c_str(), "w");
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- xmlout = fopen(output_file_.c_str(), "w");
-#endif // GTEST_OS_WINDOWS
+ xmlout = posix::FOpen(output_file_.c_str(), "w");
}
if (xmlout == NULL) {
// TODO(wan): report the reason of the failure.
@@ -2942,8 +3105,7 @@ void XmlUnitTestResultPrinter::OnUnitTestEnd(const UnitTest* unit_test) {
// most invalid characters can be retained using character references.
// TODO(wan): It might be nice to have a minimally invasive, human-readable
// escaping scheme for invalid characters, rather than dropping them.
-internal::String XmlUnitTestResultPrinter::EscapeXml(const char* str,
- bool is_attribute) {
+String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) {
Message m;
if (str != NULL) {
@@ -2973,7 +3135,7 @@ internal::String XmlUnitTestResultPrinter::EscapeXml(const char* str,
default:
if (IsValidXmlCharacter(*src)) {
if (is_attribute && IsNormalizableWhitespace(*src))
- m << internal::String::Format("&#x%02X;", unsigned(*src));
+ m << String::Format("&#x%02X;", unsigned(*src));
else
m << *src;
}
@@ -2985,13 +3147,28 @@ internal::String XmlUnitTestResultPrinter::EscapeXml(const char* str,
return m.GetString();
}
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+String XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const char* str) {
+ char* const output = new char[strlen(str) + 1];
+ char* appender = output;
+ for (char ch = *str; ch != '\0'; ch = *++str)
+ if (IsValidXmlCharacter(ch))
+ *appender++ = ch;
+ *appender = '\0';
+
+ String ret_value(output);
+ delete[] output;
+ return ret_value;
+}
// The following routines generate an XML representation of a UnitTest
// object.
//
// This is how Google Test concepts map to the DTD:
//
-// <testsuite name="AllTests"> <-- corresponds to a UnitTest object
+// <testsuites name="AllTests"> <-- corresponds to a UnitTest object
// <testsuite name="testcase-name"> <-- corresponds to a TestCase object
// <testcase name="test-name"> <-- corresponds to a TestInfo object
// <failure message="...">...</failure>
@@ -3000,118 +3177,122 @@ internal::String XmlUnitTestResultPrinter::EscapeXml(const char* str,
// <-- individual assertion failures
// </testcase>
// </testsuite>
-// </testsuite>
-
-namespace internal {
-
-// Formats the given time in milliseconds as seconds. The returned
-// C-string is owned by this function and cannot be released by the
-// caller. Calling the function again invalidates the previous
-// result.
-const char* FormatTimeInMillisAsSeconds(TimeInMillis ms) {
- static String str;
- str = (Message() << (ms/1000.0)).GetString();
- return str.c_str();
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << ms/1000.0;
+ return ss.str();
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+ const char* data) {
+ const char* segment = data;
+ *stream << "<![CDATA[";
+ for (;;) {
+ const char* const next_segment = strstr(segment, "]]>");
+ if (next_segment != NULL) {
+ stream->write(
+ segment, static_cast<std::streamsize>(next_segment - segment));
+ *stream << "]]>]]&gt;<![CDATA[";
+ segment = next_segment + strlen("]]>");
+ } else {
+ *stream << segment;
+ break;
+ }
+ }
+ *stream << "]]>";
}
-} // namespace internal
-
// Prints an XML representation of a TestInfo object.
// TODO(wan): There is also value in printing properties with the plain printer.
-void XmlUnitTestResultPrinter::PrintXmlTestInfo(FILE* out,
- const char* test_case_name,
- const TestInfo* test_info) {
- const internal::TestResult * const result = test_info->result();
- const internal::List<TestPartResult> &results = result->test_part_results();
- fprintf(out,
- " <testcase name=\"%s\" status=\"%s\" time=\"%s\" "
- "classname=\"%s\"%s",
- EscapeXmlAttribute(test_info->name()).c_str(),
- test_info->should_run() ? "run" : "notrun",
- internal::FormatTimeInMillisAsSeconds(result->elapsed_time()),
- EscapeXmlAttribute(test_case_name).c_str(),
- TestPropertiesAsXmlAttributes(result).c_str());
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ *stream << " <testcase name=\""
+ << EscapeXmlAttribute(test_info.name()).c_str()
+ << "\" status=\""
+ << (test_info.should_run() ? "run" : "notrun")
+ << "\" time=\""
+ << FormatTimeInMillisAsSeconds(result.elapsed_time())
+ << "\" classname=\"" << EscapeXmlAttribute(test_case_name).c_str()
+ << "\"" << TestPropertiesAsXmlAttributes(result).c_str();
int failures = 0;
- for (const internal::ListNode<TestPartResult>* part_node = results.Head();
- part_node != NULL;
- part_node = part_node->next()) {
- const TestPartResult& part = part_node->element();
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
if (part.failed()) {
- const internal::String message =
- internal::String::Format("%s:%d\n%s", part.file_name(),
- part.line_number(), part.message());
if (++failures == 1)
- fprintf(out, ">\n");
- fprintf(out,
- " <failure message=\"%s\" type=\"\"><![CDATA[%s]]>"
- "</failure>\n",
- EscapeXmlAttribute(part.summary()).c_str(), message.c_str());
+ *stream << ">\n";
+ *stream << " <failure message=\""
+ << EscapeXmlAttribute(part.summary()).c_str()
+ << "\" type=\"\">";
+ const String message = RemoveInvalidXmlCharacters(String::Format(
+ "%s:%d\n%s",
+ part.file_name(), part.line_number(),
+ part.message()).c_str());
+ OutputXmlCDataSection(stream, message.c_str());
+ *stream << "</failure>\n";
}
}
if (failures == 0)
- fprintf(out, " />\n");
+ *stream << " />\n";
else
- fprintf(out, " </testcase>\n");
+ *stream << " </testcase>\n";
}
// Prints an XML representation of a TestCase object
void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out,
- const TestCase* test_case) {
+ const TestCase& test_case) {
fprintf(out,
" <testsuite name=\"%s\" tests=\"%d\" failures=\"%d\" "
"disabled=\"%d\" ",
- EscapeXmlAttribute(test_case->name()).c_str(),
- test_case->total_test_count(),
- test_case->failed_test_count(),
- test_case->disabled_test_count());
+ EscapeXmlAttribute(test_case.name()).c_str(),
+ test_case.total_test_count(),
+ test_case.failed_test_count(),
+ test_case.disabled_test_count());
fprintf(out,
"errors=\"0\" time=\"%s\">\n",
- internal::FormatTimeInMillisAsSeconds(test_case->elapsed_time()));
- for (const internal::ListNode<TestInfo*>* info_node =
- test_case->test_info_list().Head();
- info_node != NULL;
- info_node = info_node->next()) {
- PrintXmlTestInfo(out, test_case->name(), info_node->element());
+ FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str());
+ for (int i = 0; i < test_case.total_test_count(); ++i) {
+ StrStream stream;
+ OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i));
+ fprintf(out, "%s", StrStreamToString(&stream).c_str());
}
fprintf(out, " </testsuite>\n");
}
// Prints an XML summary of unit_test to output stream out.
void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out,
- const UnitTest* unit_test) {
- const internal::UnitTestImpl* const impl = unit_test->impl();
+ const UnitTest& unit_test) {
fprintf(out, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
fprintf(out,
- "<testsuite tests=\"%d\" failures=\"%d\" disabled=\"%d\" "
+ "<testsuites tests=\"%d\" failures=\"%d\" disabled=\"%d\" "
"errors=\"0\" time=\"%s\" ",
- impl->total_test_count(),
- impl->failed_test_count(),
- impl->disabled_test_count(),
- internal::FormatTimeInMillisAsSeconds(impl->elapsed_time()));
- fprintf(out, "name=\"AllTests\">\n");
- for (const internal::ListNode<TestCase*>* case_node =
- impl->test_cases()->Head();
- case_node != NULL;
- case_node = case_node->next()) {
- PrintXmlTestCase(out, case_node->element());
+ unit_test.total_test_count(),
+ unit_test.failed_test_count(),
+ unit_test.disabled_test_count(),
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()).c_str());
+ if (GTEST_FLAG(shuffle)) {
+ fprintf(out, "random_seed=\"%d\" ", unit_test.random_seed());
}
- fprintf(out, "</testsuite>\n");
+ fprintf(out, "name=\"AllTests\">\n");
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i)
+ PrintXmlTestCase(out, *unit_test.GetTestCase(i));
+ fprintf(out, "</testsuites>\n");
}
// Produces a string representing the test properties in a result as space
// delimited XML attributes based on the property key="value" pairs.
-internal::String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
- const internal::TestResult* result) {
- using internal::TestProperty;
+String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+ const TestResult& result) {
Message attributes;
- const internal::List<TestProperty>& properties = result->test_properties();
- for (const internal::ListNode<TestProperty>* property_node =
- properties.Head();
- property_node != NULL;
- property_node = property_node->next()) {
- const TestProperty& property = property_node->element();
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
attributes << " " << property.key() << "="
<< "\"" << EscapeXmlAttribute(property.value()) << "\"";
}
@@ -3120,8 +3301,6 @@ internal::String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
// End XmlUnitTestResultPrinter
-namespace internal {
-
// Class ScopedTrace
// Pushes the given source file location and message onto a per-thread
@@ -3164,10 +3343,85 @@ void OsStackTraceGetter::UponLeavingGTest() {
const char* const
OsStackTraceGetter::kElidedFramesMarker =
- "... " GTEST_NAME " internal frames ...";
+ "... " GTEST_NAME_ " internal frames ...";
} // namespace internal
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+ : repeater_(new internal::TestEventRepeater()),
+ default_result_printer_(NULL),
+ default_xml_generator_(NULL) {
+}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output. Can be removed from the listeners list to shut down default
+// console output. Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+ repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it. It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+ if (listener == default_result_printer_)
+ default_result_printer_ = NULL;
+ else if (listener == default_xml_generator_)
+ default_xml_generator_ = NULL;
+ return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+ if (default_result_printer_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_result_printer_);
+ default_result_printer_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Sets the default_xml_generator attribute to the provided listener. The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+ if (default_xml_generator_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_xml_generator_);
+ default_xml_generator_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+ return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+ repeater_->set_forwarding_enabled(false);
+}
+
// class UnitTest
// Gets the singleton UnitTest object. The first time this method is
@@ -3184,13 +3438,88 @@ UnitTest * UnitTest::GetInstance() {
// different implementation in this case to bypass the compiler bug.
// This implementation makes the compiler happy, at the cost of
// leaking the UnitTest object.
-#if _MSC_VER == 1310 && !defined(_DEBUG) // MSVC 7.1 and optimized build.
+
+ // CodeGear C++Builder insists on a public destructor for the
+ // default implementation. Use this implementation to keep good OO
+ // design with private destructor.
+
+#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
static UnitTest* const instance = new UnitTest;
return instance;
#else
static UnitTest instance;
return &instance;
-#endif // _MSC_VER==1310 && !defined(_DEBUG)
+#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+}
+
+// Gets the number of successful test cases.
+int UnitTest::successful_test_case_count() const {
+ return impl()->successful_test_case_count();
+}
+
+// Gets the number of failed test cases.
+int UnitTest::failed_test_case_count() const {
+ return impl()->failed_test_case_count();
+}
+
+// Gets the number of all test cases.
+int UnitTest::total_test_case_count() const {
+ return impl()->total_test_case_count();
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTest::test_case_to_run_count() const {
+ return impl()->test_case_to_run_count();
+}
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+ return impl()->successful_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+ return impl()->disabled_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+ return impl()->elapsed_time();
+}
+
+// Returns true iff the unit test passed (i.e. all test cases passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true iff the unit test failed (i.e. some test case failed
+// or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+const TestCase* UnitTest::GetTestCase(int i) const {
+ return impl()->GetTestCase(i);
+}
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+TestCase* UnitTest::GetMutableTestCase(int i) {
+ return impl()->GetMutableTestCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+ return *impl()->listeners();
}
// Registers and returns a global test environment. When a test
@@ -3208,17 +3537,29 @@ Environment* UnitTest::AddEnvironment(Environment* env) {
return NULL;
}
- impl_->environments()->PushBack(env);
- impl_->environments_in_reverse_order()->PushFront(env);
+ impl_->environments().push_back(env);
return env;
}
+#if GTEST_HAS_EXCEPTIONS
+// A failed Google Test assertion will throw an exception of this type
+// when exceptions are enabled. We derive it from std::runtime_error,
+// which is for errors presumably detectable only at run time. Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+class GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure)
+ : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+};
+#endif
+
// Adds a TestPartResult to the current TestResult object. All Google Test
// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
// this to report their results. The user code should use the
// assertion macros instead of calling this directly.
// L < mutex_
-void UnitTest::AddTestPartResult(TestPartResultType result_type,
+void UnitTest::AddTestPartResult(TestPartResult::Type result_type,
const char* file_name,
int line_number,
const internal::String& message,
@@ -3227,15 +3568,14 @@ void UnitTest::AddTestPartResult(TestPartResultType result_type,
msg << message;
internal::MutexLock lock(&mutex_);
- if (impl_->gtest_trace_stack()->size() > 0) {
- msg << "\n" << GTEST_NAME << " trace:";
-
- for (internal::ListNode<internal::TraceInfo>* node =
- impl_->gtest_trace_stack()->Head();
- node != NULL;
- node = node->next()) {
- const internal::TraceInfo& trace = node->element();
- msg << "\n" << trace.file << ":" << trace.line << ": " << trace.message;
+ if (impl_->gtest_trace_stack().size() > 0) {
+ msg << "\n" << GTEST_NAME_ << " trace:";
+
+ for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
+ i > 0; --i) {
+ const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+ msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+ << " " << trace.message;
}
}
@@ -3249,11 +3589,30 @@ void UnitTest::AddTestPartResult(TestPartResultType result_type,
impl_->GetTestPartResultReporterForCurrentThread()->
ReportTestPartResult(result);
- // If this is a failure and the user wants the debugger to break on
- // failures ...
- if (result_type != TPRT_SUCCESS && GTEST_FLAG(break_on_failure)) {
- // ... then we generate a seg fault.
- *static_cast<int*>(NULL) = 1;
+ if (result_type != TestPartResult::kSuccess) {
+ // gtest_break_on_failure takes precedence over
+ // gtest_throw_on_failure. This allows a user to set the latter
+ // in the code (perhaps in order to use Google Test assertions
+ // with another testing framework) and specify the former on the
+ // command line for debugging.
+ if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS
+ // Using DebugBreak on Windows allows gtest to still break into a debugger
+ // when a failure happens and both the --gtest_break_on_failure and
+ // the --gtest_catch_exceptions flags are specified.
+ DebugBreak();
+#else
+ abort();
+#endif // GTEST_OS_WINDOWS
+ } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+ throw GoogleTestFailureException(result);
+#else
+ // We cannot call abort() as it generates a pop-up in debug mode
+ // that cannot be suppressed in VC 7.1 or below.
+ exit(1);
+#endif
+ }
}
}
@@ -3261,7 +3620,7 @@ void UnitTest::AddTestPartResult(TestPartResultType result_type,
// the supplied value already exists, updates its value instead.
void UnitTest::RecordPropertyForCurrentTest(const char* key,
const char* value) {
- const internal::TestProperty test_property(key, value);
+ const TestProperty test_property(key, value);
impl_->current_test_result()->RecordProperty(test_property);
}
@@ -3271,18 +3630,48 @@ void UnitTest::RecordPropertyForCurrentTest(const char* key,
// We don't protect this under mutex_, as we only support calling it
// from the main thread.
int UnitTest::Run() {
-#if defined(GTEST_OS_WINDOWS) && !defined(__MINGW32__)
-
-#if !defined(_WIN32_WCE)
- // SetErrorMode doesn't exist on CE.
- if (GTEST_FLAG(catch_exceptions)) {
- // The user wants Google Test to catch exceptions thrown by the tests.
-
- // This lets fatal errors be handled by us, instead of causing pop-ups.
+#if GTEST_HAS_SEH
+ // Catch SEH-style exceptions.
+
+ const bool in_death_test_child_process =
+ internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+ // Either the user wants Google Test to catch exceptions thrown by the
+ // tests or this is executing in the context of death test child
+ // process. In either case the user does not want to see pop-up dialogs
+ // about crashes - they are expected..
+ if (GTEST_FLAG(catch_exceptions) || in_death_test_child_process) {
+#if !GTEST_OS_WINDOWS_MOBILE
+ // SetErrorMode doesn't exist on CE.
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+#endif // !GTEST_OS_WINDOWS_MOBILE
+
+#if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+ // Death test children can be terminated with _abort(). On Windows,
+ // _abort() can show a dialog with a warning message. This forces the
+ // abort message to go to stderr instead.
+ _set_error_mode(_OUT_TO_STDERR);
+#endif
+
+#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+ // In the debug version, Visual Studio pops up a separate dialog
+ // offering a choice to debug the aborted program. We need to suppress
+ // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+ // executed. Google Test will notify the user of any unexpected
+ // failure via stderr.
+ //
+ // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
+ // Users of prior VC versions shall suffer the agony and pain of
+ // clicking through the countless debug dialogs.
+ // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
+ // debug mode when compiled with VC 7.1 or lower.
+ if (!GTEST_FLAG(break_on_failure))
+ _set_abort_behavior(
+ 0x0, // Clear the following flags:
+ _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump.
+#endif
}
-#endif // _WIN32_WCE
__try {
return impl_->RunAllTests();
@@ -3293,11 +3682,10 @@ int UnitTest::Run() {
return 1;
}
-#else
- // We are on Linux, Mac OS or MingW. There is no exception of any kind.
+#else // We are on a compiler or platform that doesn't support SEH.
return impl_->RunAllTests();
-#endif // GTEST_OS_WINDOWS
+#endif // GTEST_HAS_SEH
}
// Returns the working directory when the first TEST() or TEST_F() was
@@ -3322,7 +3710,10 @@ const TestInfo* UnitTest::current_test_info() const {
return impl_->current_test_info();
}
-#ifdef GTEST_HAS_PARAM_TEST
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+#if GTEST_HAS_PARAM_TEST
// Returns ParameterizedTestCaseRegistry object used to keep track of
// value-parameterized tests and instantiate and register them.
// L < mutex_
@@ -3347,14 +3738,14 @@ UnitTest::~UnitTest() {
// L < mutex_
void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) {
internal::MutexLock lock(&mutex_);
- impl_->gtest_trace_stack()->PushFront(trace);
+ impl_->gtest_trace_stack().push_back(trace);
}
// Pops a trace from the per-thread Google Test trace stack.
// L < mutex_
void UnitTest::PopGTestTrace() {
internal::MutexLock lock(&mutex_);
- impl_->gtest_trace_stack()->PopFront(NULL);
+ impl_->gtest_trace_stack().pop_back();
}
namespace internal {
@@ -3376,39 +3767,87 @@ UnitTestImpl::UnitTestImpl(UnitTest* parent)
&default_global_test_part_result_reporter_),
per_thread_test_part_result_reporter_(
&default_per_thread_test_part_result_reporter_),
- test_cases_(),
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
parameterized_test_registry_(),
parameterized_tests_registered_(false),
#endif // GTEST_HAS_PARAM_TEST
- last_death_test_case_(NULL),
+ last_death_test_case_(-1),
current_test_case_(NULL),
current_test_info_(NULL),
ad_hoc_test_result_(),
- result_printer_(NULL),
os_stack_trace_getter_(NULL),
-#ifdef GTEST_HAS_DEATH_TEST
+ post_flag_parse_init_performed_(false),
+ random_seed_(0), // Will be overridden by the flag before first use.
+ random_(0), // Will be reseeded before first use.
+#if GTEST_HAS_DEATH_TEST
elapsed_time_(0),
internal_run_death_test_flag_(NULL),
death_test_factory_(new DefaultDeathTestFactory) {
#else
elapsed_time_(0) {
#endif // GTEST_HAS_DEATH_TEST
+ listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
}
UnitTestImpl::~UnitTestImpl() {
// Deletes every TestCase.
- test_cases_.ForEach(internal::Delete<TestCase>);
+ ForEach(test_cases_, internal::Delete<TestCase>);
// Deletes every Environment.
- environments_.ForEach(internal::Delete<Environment>);
-
- // Deletes the current test result printer.
- delete result_printer_;
+ ForEach(environments_, internal::Delete<Environment>);
delete os_stack_trace_getter_;
}
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+ if (internal_run_death_test_flag_.get() != NULL)
+ listeners()->SuppressEventForwarding();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+ const String& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml") {
+ listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format != "") {
+ printf("WARNING: unrecognized output format \"%s\" ignored.\n",
+ output_format.c_str());
+ fflush(stdout);
+ }
+}
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests. Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+ // Ensures that this function does not execute more than once.
+ if (!post_flag_parse_init_performed_) {
+ post_flag_parse_init_performed_ = true;
+
+#if GTEST_HAS_DEATH_TEST
+ InitDeathTestSubprocessControlInfo();
+ SuppressTestEventsIfInSubprocess();
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Registers parameterized tests. This makes parameterized tests
+ // available to the UnitTest reflection API without running
+ // RUN_ALL_TESTS.
+ RegisterParameterizedTests();
+
+ // Configures listeners for XML output. This makes it possible for users
+ // to shut down the default XML output before invoking RUN_ALL_TESTS.
+ ConfigureXmlOutput();
+ }
+}
+
// A predicate that checks the name of a TestCase against a known
// value.
//
@@ -3433,7 +3872,9 @@ class TestCaseNameIs {
};
// Finds and returns a TestCase with the given name. If one doesn't
-// exist, creates one and returns it.
+// exist, creates one and returns it. It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
//
// Arguments:
//
@@ -3445,34 +3886,38 @@ TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
Test::SetUpTestCaseFunc set_up_tc,
Test::TearDownTestCaseFunc tear_down_tc) {
// Can we find a TestCase with the given name?
- internal::ListNode<TestCase*>* node = test_cases_.FindIf(
- TestCaseNameIs(test_case_name));
+ const std::vector<TestCase*>::const_iterator test_case =
+ std::find_if(test_cases_.begin(), test_cases_.end(),
+ TestCaseNameIs(test_case_name));
+
+ if (test_case != test_cases_.end())
+ return *test_case;
- if (node == NULL) {
- // No. Let's create one.
- TestCase* const test_case =
+ // No. Let's create one.
+ TestCase* const new_test_case =
new TestCase(test_case_name, comment, set_up_tc, tear_down_tc);
- // Is this a death test case?
- if (internal::UnitTestOptions::MatchesFilter(String(test_case_name),
- kDeathTestCaseFilter)) {
- // Yes. Inserts the test case after the last death test case
- // defined so far.
- node = test_cases_.InsertAfter(last_death_test_case_, test_case);
- last_death_test_case_ = node;
- } else {
- // No. Appends to the end of the list.
- test_cases_.PushBack(test_case);
- node = test_cases_.Last();
- }
+ // Is this a death test case?
+ if (internal::UnitTestOptions::MatchesFilter(String(test_case_name),
+ kDeathTestCaseFilter)) {
+ // Yes. Inserts the test case after the last death test case
+ // defined so far. This only works when the test cases haven't
+ // been shuffled. Otherwise we may end up running a death test
+ // after a non-death test.
+ ++last_death_test_case_;
+ test_cases_.insert(test_cases_.begin() + last_death_test_case_,
+ new_test_case);
+ } else {
+ // No. Appends to the end of the list.
+ test_cases_.push_back(new_test_case);
}
- // Returns the TestCase found.
- return node->element();
+ test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
+ return new_test_case;
}
// Helpers for setting up / tearing down the given environment. They
-// are for use in the List::ForEach() method.
+// are for use in the ForEach() function.
static void SetUpEnvironment(Environment* env) { env->SetUp(); }
static void TearDownEnvironment(Environment* env) { env->TearDown(); }
@@ -3482,7 +3927,7 @@ static void TearDownEnvironment(Environment* env) { env->TearDown(); }
// considered to be failed, but the rest of the tests will still be
// run. (We disable exceptions on Linux and Mac OS X, so the issue
// doesn't apply there.)
-// When parameterized tests are enabled, it explands and registers
+// When parameterized tests are enabled, it expands and registers
// parameterized tests first in RegisterParameterizedTests().
// All other functions called from RunAllTests() may safely assume that
// parameterized tests are ready to be counted and run.
@@ -3495,189 +3940,300 @@ int UnitTestImpl::RunAllTests() {
return 1;
}
- RegisterParameterizedTests();
-
- // Lists all the tests and exits if the --gtest_list_tests
- // flag was specified.
- if (GTEST_FLAG(list_tests)) {
- ListAllTests();
+ // Do not run any test if the --help flag was specified.
+ if (g_help_flag)
return 0;
- }
+
+ // Repeats the call to the post-flag parsing initialization in case the
+ // user didn't call InitGoogleTest.
+ PostFlagParsingInit();
+
+ // Even if sharding is not on, test runners may want to use the
+ // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+ // protocol.
+ internal::WriteToShardStatusFileIfNeeded();
// True iff we are in a subprocess for running a thread-safe-style
// death test.
bool in_subprocess_for_death_test = false;
-#ifdef GTEST_HAS_DEATH_TEST
- internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+#if GTEST_HAS_DEATH_TEST
in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
#endif // GTEST_HAS_DEATH_TEST
- UnitTestEventListenerInterface * const printer = result_printer();
+ const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+ in_subprocess_for_death_test);
// Compares the full test names with the filter to decide which
// tests to run.
- const bool has_tests_to_run = FilterTests() > 0;
+ const bool has_tests_to_run = FilterTests(should_shard
+ ? HONOR_SHARDING_PROTOCOL
+ : IGNORE_SHARDING_PROTOCOL) > 0;
+
+ // Lists the tests and exits if the --gtest_list_tests flag was specified.
+ if (GTEST_FLAG(list_tests)) {
+ // This must be called *after* FilterTests() has been called.
+ ListTestsMatchingFilter();
+ return 0;
+ }
+
+ random_seed_ = GTEST_FLAG(shuffle) ?
+ GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
// True iff at least one test has failed.
bool failed = false;
+ TestEventListener* repeater = listeners()->repeater();
+
+ repeater->OnTestProgramStart(*parent_);
+
// How many times to repeat the tests? We don't want to repeat them
// when we are inside the subprocess of a death test.
const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
// Repeats forever if the repeat count is negative.
const bool forever = repeat < 0;
for (int i = 0; forever || i != repeat; i++) {
- if (repeat != 1) {
- printf("\nRepeating all tests (iteration %d) . . .\n\n", i + 1);
- }
-
- // Tells the unit test event listener that the tests are about to
- // start.
- printer->OnUnitTestStart(parent_);
+ ClearResult();
const TimeInMillis start = GetTimeInMillis();
+ // Shuffles test cases and tests if requested.
+ if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+ random()->Reseed(random_seed_);
+ // This should be done before calling OnTestIterationStart(),
+ // such that a test event listener can see the actual test order
+ // in the event.
+ ShuffleTests();
+ }
+
+ // Tells the unit test event listeners that the tests are about to start.
+ repeater->OnTestIterationStart(*parent_, i);
+
// Runs each test case if there is at least one test to run.
if (has_tests_to_run) {
// Sets up all environments beforehand.
- printer->OnGlobalSetUpStart(parent_);
- environments_.ForEach(SetUpEnvironment);
- printer->OnGlobalSetUpEnd(parent_);
+ repeater->OnEnvironmentsSetUpStart(*parent_);
+ ForEach(environments_, SetUpEnvironment);
+ repeater->OnEnvironmentsSetUpEnd(*parent_);
// Runs the tests only if there was no fatal failure during global
// set-up.
if (!Test::HasFatalFailure()) {
- test_cases_.ForEach(TestCase::RunTestCase);
+ for (int test_index = 0; test_index < total_test_case_count();
+ test_index++) {
+ GetMutableTestCase(test_index)->Run();
+ }
}
// Tears down all environments in reverse order afterwards.
- printer->OnGlobalTearDownStart(parent_);
- environments_in_reverse_order_.ForEach(TearDownEnvironment);
- printer->OnGlobalTearDownEnd(parent_);
+ repeater->OnEnvironmentsTearDownStart(*parent_);
+ std::for_each(environments_.rbegin(), environments_.rend(),
+ TearDownEnvironment);
+ repeater->OnEnvironmentsTearDownEnd(*parent_);
}
elapsed_time_ = GetTimeInMillis() - start;
- // Tells the unit test event listener that the tests have just
- // finished.
- printer->OnUnitTestEnd(parent_);
+ // Tells the unit test event listener that the tests have just finished.
+ repeater->OnTestIterationEnd(*parent_, i);
// Gets the result and clears it.
if (!Passed()) {
failed = true;
}
- ClearResult();
+
+ // Restores the original test order after the iteration. This
+ // allows the user to quickly repro a failure that happens in the
+ // N-th iteration without repeating the first (N - 1) iterations.
+ // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+ // case the user somehow changes the value of the flag somewhere
+ // (it's always safe to unshuffle the tests).
+ UnshuffleTests();
+
+ if (GTEST_FLAG(shuffle)) {
+ // Picks a new random seed for each iteration.
+ random_seed_ = GetNextRandomSeed(random_seed_);
+ }
}
+ repeater->OnTestProgramEnd(*parent_);
+
// Returns 0 if all tests passed, or 1 other wise.
return failed ? 1 : 0;
}
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+ const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+ if (test_shard_file != NULL) {
+ FILE* const file = posix::FOpen(test_shard_file, "w");
+ if (file == NULL) {
+ ColoredPrintf(COLOR_RED,
+ "Could not write to the test shard status file \"%s\" "
+ "specified by the %s environment variable.\n",
+ test_shard_file, kTestShardStatusFile);
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+ fclose(file);
+ }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+ const char* shard_index_env,
+ bool in_subprocess_for_death_test) {
+ if (in_subprocess_for_death_test) {
+ return false;
+ }
+
+ const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+ const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+ if (total_shards == -1 && shard_index == -1) {
+ return false;
+ } else if (total_shards == -1 && shard_index != -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestShardIndex << " = " << shard_index
+ << ", but have left " << kTestTotalShards << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (total_shards != -1 && shard_index == -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestTotalShards << " = " << total_shards
+ << ", but have left " << kTestShardIndex << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (shard_index < 0 || shard_index >= total_shards) {
+ const Message msg = Message()
+ << "Invalid environment variables: we require 0 <= "
+ << kTestShardIndex << " < " << kTestTotalShards
+ << ", but you have " << kTestShardIndex << "=" << shard_index
+ << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+
+ return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+Int32 Int32FromEnvOrDie(const char* const var, Int32 default_val) {
+ const char* str_val = posix::GetEnv(var);
+ if (str_val == NULL) {
+ return default_val;
+ }
+
+ Int32 result;
+ if (!ParseInt32(Message() << "The value of environment variable " << var,
+ str_val, &result)) {
+ exit(EXIT_FAILURE);
+ }
+ return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+ return (test_id % total_shards) == shard_index;
+}
+
// Compares the name of each test with the user-specified filter to
// decide whether the test should be run, then records the result in
// each TestCase and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
// Returns the number of tests that should run.
-int UnitTestImpl::FilterTests() {
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+ const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+ const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+ // num_runnable_tests are the number of tests that will
+ // run across all shards (i.e., match filter and are not disabled).
+ // num_selected_tests are the number of tests to be run on
+ // this shard.
int num_runnable_tests = 0;
- for (const internal::ListNode<TestCase *> *test_case_node =
- test_cases_.Head();
- test_case_node != NULL;
- test_case_node = test_case_node->next()) {
- TestCase * const test_case = test_case_node->element();
+ int num_selected_tests = 0;
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ TestCase* const test_case = test_cases_[i];
const String &test_case_name = test_case->name();
test_case->set_should_run(false);
- for (const internal::ListNode<TestInfo *> *test_info_node =
- test_case->test_info_list().Head();
- test_info_node != NULL;
- test_info_node = test_info_node->next()) {
- TestInfo * const test_info = test_info_node->element();
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ TestInfo* const test_info = test_case->test_info_list()[j];
const String test_name(test_info->name());
// A test is disabled if test case name or test name matches
// kDisableTestFilter.
const bool is_disabled =
- internal::UnitTestOptions::MatchesFilter(test_case_name,
- kDisableTestFilter) ||
- internal::UnitTestOptions::MatchesFilter(test_name,
- kDisableTestFilter);
+ internal::UnitTestOptions::MatchesFilter(test_case_name,
+ kDisableTestFilter) ||
+ internal::UnitTestOptions::MatchesFilter(test_name,
+ kDisableTestFilter);
test_info->impl()->set_is_disabled(is_disabled);
- const bool should_run = !is_disabled &&
+ const bool matches_filter =
internal::UnitTestOptions::FilterMatchesTest(test_case_name,
test_name);
- test_info->impl()->set_should_run(should_run);
- test_case->set_should_run(test_case->should_run() || should_run);
- if (should_run) {
- num_runnable_tests++;
- }
- }
- }
- return num_runnable_tests;
-}
+ test_info->impl()->set_matches_filter(matches_filter);
-// Lists all tests by name.
-void UnitTestImpl::ListAllTests() {
- for (const internal::ListNode<TestCase*>* test_case_node = test_cases_.Head();
- test_case_node != NULL;
- test_case_node = test_case_node->next()) {
- const TestCase* const test_case = test_case_node->element();
+ const bool is_runnable =
+ (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+ matches_filter;
- // Prints the test case name following by an indented list of test nodes.
- printf("%s.\n", test_case->name());
+ const bool is_selected = is_runnable &&
+ (shard_tests == IGNORE_SHARDING_PROTOCOL ||
+ ShouldRunTestOnShard(total_shards, shard_index,
+ num_runnable_tests));
- for (const internal::ListNode<TestInfo*>* test_info_node =
- test_case->test_info_list().Head();
- test_info_node != NULL;
- test_info_node = test_info_node->next()) {
- const TestInfo* const test_info = test_info_node->element();
+ num_runnable_tests += is_runnable;
+ num_selected_tests += is_selected;
- printf(" %s\n", test_info->name());
+ test_info->impl()->set_should_run(is_selected);
+ test_case->set_should_run(test_case->should_run() || is_selected);
}
}
- fflush(stdout);
-}
-
-// Sets the unit test result printer.
-//
-// Does nothing if the input and the current printer object are the
-// same; otherwise, deletes the old printer object and makes the
-// input the current printer.
-void UnitTestImpl::set_result_printer(
- UnitTestEventListenerInterface* result_printer) {
- if (result_printer_ != result_printer) {
- delete result_printer_;
- result_printer_ = result_printer;
- }
-}
-
-// Returns the current unit test result printer if it is not NULL;
-// otherwise, creates an appropriate result printer, makes it the
-// current printer, and returns it.
-UnitTestEventListenerInterface* UnitTestImpl::result_printer() {
- if (result_printer_ != NULL) {
- return result_printer_;
- }
-
-#ifdef GTEST_HAS_DEATH_TEST
- if (internal_run_death_test_flag_.get() != NULL) {
- result_printer_ = new NullUnitTestResultPrinter;
- return result_printer_;
- }
-#endif // GTEST_HAS_DEATH_TEST
-
- UnitTestEventsRepeater *repeater = new UnitTestEventsRepeater;
- const String& output_format = internal::UnitTestOptions::GetOutputFormat();
- if (output_format == "xml") {
- repeater->AddListener(new XmlUnitTestResultPrinter(
- internal::UnitTestOptions::GetOutputFile().c_str()));
- } else if (output_format != "") {
- printf("WARNING: unrecognized output format \"%s\" ignored.\n",
- output_format.c_str());
- fflush(stdout);
+ return num_selected_tests;
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ const TestCase* const test_case = test_cases_[i];
+ bool printed_test_case_name = false;
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ const TestInfo* const test_info =
+ test_case->test_info_list()[j];
+ if (test_info->matches_filter()) {
+ if (!printed_test_case_name) {
+ printed_test_case_name = true;
+ printf("%s.\n", test_case->name());
+ }
+ printf(" %s\n", test_info->name());
+ }
+ }
}
- repeater->AddListener(new PrettyUnitTestResultPrinter);
- result_printer_ = repeater;
- return result_printer_;
+ fflush(stdout);
}
// Sets the OS stack trace getter.
@@ -3706,28 +4262,55 @@ OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
// Returns the TestResult for the test that's currently running, or
// the TestResult for the ad hoc test if no test is running.
-internal::TestResult* UnitTestImpl::current_test_result() {
+TestResult* UnitTestImpl::current_test_result() {
return current_test_info_ ?
current_test_info_->impl()->result() : &ad_hoc_test_result_;
}
+// Shuffles all test cases, and the tests within each test case,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+ // Shuffles the death test cases.
+ ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
+
+ // Shuffles the non-death test cases.
+ ShuffleRange(random(), last_death_test_case_ + 1,
+ static_cast<int>(test_cases_.size()), &test_case_indices_);
+
+ // Shuffles the tests inside each test case.
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ test_cases_[i]->ShuffleTests(random());
+ }
+}
+
+// Restores the test cases and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ // Unshuffles the tests in each test case.
+ test_cases_[i]->UnshuffleTests();
+ // Resets the index of each test case.
+ test_case_indices_[i] = static_cast<int>(i);
+ }
+}
+
// TestInfoImpl constructor. The new instance assumes ownership of the test
// factory object.
TestInfoImpl::TestInfoImpl(TestInfo* parent,
- const char* test_case_name,
- const char* name,
- const char* test_case_comment,
- const char* comment,
- TypeId fixture_class_id,
+ const char* a_test_case_name,
+ const char* a_name,
+ const char* a_test_case_comment,
+ const char* a_comment,
+ TypeId a_fixture_class_id,
internal::TestFactoryBase* factory) :
parent_(parent),
- test_case_name_(String(test_case_name)),
- name_(String(name)),
- test_case_comment_(String(test_case_comment)),
- comment_(String(comment)),
- fixture_class_id_(fixture_class_id),
+ test_case_name_(String(a_test_case_name)),
+ name_(String(a_name)),
+ test_case_comment_(String(a_test_case_comment)),
+ comment_(String(a_comment)),
+ fixture_class_id_(a_fixture_class_id),
should_run_(false),
is_disabled_(false),
+ matches_filter_(false),
factory_(factory) {
}
@@ -3746,15 +4329,41 @@ TestInfoImpl::~TestInfoImpl() {
// For example, if Foo() calls Bar(), which in turn calls
// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, int skip_count) {
+String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+ int skip_count) {
// We pass skip_count + 1 to skip this wrapper function in addition
// to what the user really wants to skip.
- return unit_test->impl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+ return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
}
-// Returns the number of failed test parts in the given test result object.
-int GetFailedPartCount(const TestResult* result) {
- return result->failed_part_count();
+// Used by the GTEST_HIDE_UNREACHABLE_CODE_ macro to suppress unreachable
+// code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+ // This condition is always false so AlwaysTrue() never actually throws,
+ // but it makes the compiler think that it may throw.
+ if (IsTrue(false))
+ throw ClassUniqueToAlwaysTrue();
+#endif // GTEST_HAS_EXCEPTIONS
+ return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+ const size_t prefix_len = strlen(prefix);
+ if (strncmp(*pstr, prefix, prefix_len) == 0) {
+ *pstr += prefix_len;
+ return true;
+ }
+ return false;
}
// Parses a string as a command line flag. The string should have
@@ -3768,9 +4377,9 @@ const char* ParseFlagValue(const char* str,
// str and flag must not be NULL.
if (str == NULL || flag == NULL) return NULL;
- // The flag must start with "--" followed by GTEST_FLAG_PREFIX.
- const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX, flag);
- const size_t flag_len = flag_str.GetLength();
+ // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+ const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag);
+ const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
// Skips the flag name.
@@ -3846,6 +4455,127 @@ bool ParseStringFlag(const char* str, const char* flag, String* value) {
return true;
}
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+ return (SkipPrefix("--", &str) ||
+ SkipPrefix("-", &str) ||
+ SkipPrefix("/", &str)) &&
+ !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+ (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+ SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text. The following escape
+// sequences can be used in the string to control the text color:
+//
+// @@ prints a single '@' character.
+// @R changes the color to red.
+// @G changes the color to green.
+// @Y changes the color to yellow.
+// @D changes to the default terminal text color.
+//
+// TODO(wan@google.com): Write tests for this once we add stdout
+// capturing to Google Test.
+static void PrintColorEncoded(const char* str) {
+ GTestColor color = COLOR_DEFAULT; // The current color.
+
+ // Conceptually, we split the string into segments divided by escape
+ // sequences. Then we print one segment at a time. At the end of
+ // each iteration, the str pointer advances to the beginning of the
+ // next segment.
+ for (;;) {
+ const char* p = strchr(str, '@');
+ if (p == NULL) {
+ ColoredPrintf(color, "%s", str);
+ return;
+ }
+
+ ColoredPrintf(color, "%s", String(str, p - str).c_str());
+
+ const char ch = p[1];
+ str = p + 2;
+ if (ch == '@') {
+ ColoredPrintf(color, "@");
+ } else if (ch == 'D') {
+ color = COLOR_DEFAULT;
+ } else if (ch == 'R') {
+ color = COLOR_RED;
+ } else if (ch == 'G') {
+ color = COLOR_GREEN;
+ } else if (ch == 'Y') {
+ color = COLOR_YELLOW;
+ } else {
+ --str;
+ }
+ }
+}
+
+static const char kColorEncodedHelpMessage[] =
+"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
+"following command line flags to control its behavior:\n"
+"\n"
+"Test Selection:\n"
+" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
+" List the names of all tests instead of running them. The name of\n"
+" TEST(Foo, Bar) is \"Foo.Bar\".\n"
+" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
+ "[@G-@YNEGATIVE_PATTERNS]@D\n"
+" Run only the tests whose name matches one of the positive patterns but\n"
+" none of the negative patterns. '?' matches any single character; '*'\n"
+" matches any substring; ':' separates two patterns.\n"
+" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
+" Run all disabled tests too.\n"
+"\n"
+"Test Execution:\n"
+" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
+" Run the tests repeatedly; use a negative count to repeat forever.\n"
+" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
+" Randomize tests' orders on every iteration.\n"
+" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
+" Random number seed to use for shuffling test orders (between 1 and\n"
+" 99999, or 0 to use a seed based on the current time).\n"
+"\n"
+"Test Output:\n"
+" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+" Enable/disable colored output. The default is @Gauto@D.\n"
+" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
+" Don't print the elapsed time of each test.\n"
+" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
+ GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
+" Generate an XML report in the given directory or with the given file\n"
+" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
+"\n"
+"Assertion Behavior:\n"
+#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+" Set the default death test style.\n"
+#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
+" Turn assertion failures into debugger break-points.\n"
+" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
+" Turn assertion failures into C++ exceptions.\n"
+#if GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions@D\n"
+" Suppress pop-ups caused by exceptions.\n"
+#endif // GTEST_OS_WINDOWS
+"\n"
+"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
+ "the corresponding\n"
+"environment variable of a flag (all letters in upper-case). For example, to\n"
+"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
+ "color=no@D or set\n"
+"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
+"\n"
+"For more information, please read the " GTEST_NAME_ " documentation at\n"
+"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
+"(not one in your own code or tests), please report it to\n"
+"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
// Parses the command line for Google Test flags, without initializing
// other parts of Google Test. The type parameter CharType can be
// instantiated to either char or wchar_t.
@@ -3860,20 +4590,29 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
using internal::ParseStringFlag;
// Do we see a Google Test flag?
- if (ParseBoolFlag(arg, kBreakOnFailureFlag,
+ if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+ &GTEST_FLAG(also_run_disabled_tests)) ||
+ ParseBoolFlag(arg, kBreakOnFailureFlag,
&GTEST_FLAG(break_on_failure)) ||
ParseBoolFlag(arg, kCatchExceptionsFlag,
&GTEST_FLAG(catch_exceptions)) ||
ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
ParseStringFlag(arg, kDeathTestStyleFlag,
&GTEST_FLAG(death_test_style)) ||
+ ParseBoolFlag(arg, kDeathTestUseFork,
+ &GTEST_FLAG(death_test_use_fork)) ||
ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
ParseStringFlag(arg, kInternalRunDeathTestFlag,
&GTEST_FLAG(internal_run_death_test)) ||
ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
- ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat))
+ ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
+ ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
+ ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
+ ParseInt32Flag(arg, kStackTraceDepthFlag,
+ &GTEST_FLAG(stack_trace_depth)) ||
+ ParseBoolFlag(arg, kThrowOnFailureFlag, &GTEST_FLAG(throw_on_failure))
) {
// Yes. Shift the remainder of the argv list left by one. Note
// that argv has (*argc + 1) elements, the last one always being
@@ -3889,8 +4628,21 @@ void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
// We also need to decrement the iterator as we just removed
// an element.
i--;
+ } else if (arg_string == "--help" || arg_string == "-h" ||
+ arg_string == "-?" || arg_string == "/?" ||
+ HasGoogleTestFlagPrefix(arg)) {
+ // Both help flag and unrecognized Google Test flags (excluding
+ // internal ones) trigger help display.
+ g_help_flag = true;
}
}
+
+ if (g_help_flag) {
+ // We print the help here instead of in RUN_ALL_TESTS(), as the
+ // latter may not be called at all if the user is using Google
+ // Test with another testing framework.
+ PrintColorEncoded(kColorEncodedHelpMessage);
+ }
}
// Parses the command line for Google Test flags, without initializing
@@ -3917,7 +4669,7 @@ void InitGoogleTestImpl(int* argc, CharType** argv) {
internal::g_executable_path = internal::StreamableToString(argv[0]);
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
g_argvs.clear();
for (int i = 0; i != *argc; i++) {
g_argvs.push_back(StreamableToString(argv[i]));
@@ -3925,6 +4677,7 @@ void InitGoogleTestImpl(int* argc, CharType** argv) {
#endif // GTEST_HAS_DEATH_TEST
ParseGoogleTestFlagsOnly(argc, argv);
+ GetUnitTestImpl()->PostFlagParsingInit();
}
} // namespace internal
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h
index f0e109a3..121dc1f 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-death-test.h
@@ -49,7 +49,7 @@ namespace testing {
// after forking.
GTEST_DECLARE_string_(death_test_style);
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
// The following macros are useful for writing death tests.
@@ -86,6 +86,57 @@ GTEST_DECLARE_string_(death_test_style);
//
// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
//
+// On the regular expressions used in death tests:
+//
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
// Known caveats:
//
// A "threadsafe" style death test obtains the path to the test
@@ -125,23 +176,28 @@ GTEST_DECLARE_string_(death_test_style);
// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
// Tests that an exit code describes a normal exit with a given exit code.
-class ExitedWithCode {
+class GTEST_API_ ExitedWithCode {
public:
explicit ExitedWithCode(int exit_code);
bool operator()(int exit_status) const;
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ExitedWithCode& other);
+
const int exit_code_;
};
+#if !GTEST_OS_WINDOWS
// Tests that an exit code describes an exit due to termination by a
// given signal.
-class KilledBySignal {
+class GTEST_API_ KilledBySignal {
public:
explicit KilledBySignal(int signum);
bool operator()(int exit_status) const;
private:
const int signum_;
};
+#endif // !GTEST_OS_WINDOWS
// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
// The death testing framework causes this to have interesting semantics,
@@ -189,10 +245,10 @@ class KilledBySignal {
#ifdef NDEBUG
#define EXPECT_DEBUG_DEATH(statement, regex) \
- do { statement; } while (false)
+ do { statement; } while (::testing::internal::AlwaysFalse())
#define ASSERT_DEBUG_DEATH(statement, regex) \
- do { statement; } while (false)
+ do { statement; } while (::testing::internal::AlwaysFalse())
#else
@@ -204,6 +260,24 @@ class KilledBySignal {
#endif // NDEBUG for EXPECT_DEBUG_DEATH
#endif // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+#define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+#define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-message.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-message.h
index 7effd08..f135b69 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-message.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-message.h
@@ -46,6 +46,8 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#include <limits>
+
#include <gtest/internal/gtest-string.h>
#include <gtest/internal/gtest-internal.h>
@@ -77,7 +79,7 @@ namespace testing {
// latter (it causes an access violation if you do). The Message
// class hides this difference by treating a NULL char pointer as
// "(null)".
-class Message {
+class GTEST_API_ Message {
private:
// The type of basic IO manipulators (endl, ends, and flush) for
// narrow streams.
@@ -89,7 +91,11 @@ class Message {
// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
// stack frame leading to huge stack frames in some cases; gcc does not reuse
// the stack space.
- Message() : ss_(new internal::StrStream) {}
+ Message() : ss_(new internal::StrStream) {
+ // By default, we want there to be enough precision when printing
+ // a double to a Message.
+ *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+ }
// Copy constructor.
Message(const Message& msg) : ss_(new internal::StrStream) { // NOLINT
@@ -102,7 +108,7 @@ class Message {
}
~Message() { delete ss_; }
-#ifdef GTEST_OS_SYMBIAN
+#if GTEST_OS_SYMBIAN
// Streams a value (either a pointer or not) to this object.
template <typename T>
inline Message& operator <<(const T& value) {
@@ -187,13 +193,13 @@ class Message {
}
private:
-#ifdef GTEST_OS_SYMBIAN
+#if GTEST_OS_SYMBIAN
// These are needed as the Nokia Symbian Compiler cannot decide between
// const T& and const T* in a function template. The Nokia compiler _can_
// decide between class template specializations for T and T*, so a
// tr1::type_traits-like is_pointer works, and we can overload on that.
template <typename T>
- inline void StreamHelper(internal::true_type dummy, T* pointer) {
+ inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) {
if (pointer == NULL) {
*ss_ << "(null)";
} else {
@@ -201,7 +207,7 @@ class Message {
}
}
template <typename T>
- inline void StreamHelper(internal::false_type dummy, const T& value) {
+ inline void StreamHelper(internal::false_type /*dummy*/, const T& value) {
::GTestStreamToHelper(ss_, value);
}
#endif // GTEST_OS_SYMBIAN
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h
index 0cf05dc..3184d07 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-param-test.h
@@ -133,9 +133,12 @@ INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
// in the given test case, whether their definitions come before or
// AFTER the INSTANTIATE_TEST_CASE_P statement.
//
-// Please also note that generator expressions are evaluated in
-// RUN_ALL_TESTS(), after main() has started. This allows evaluation of
-// parameter list based on command line parameters.
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
//
// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
// for more examples.
@@ -146,16 +149,20 @@ INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
#endif // 0
-
-#include <utility>
-
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_HAS_PARAM_TEST
+#if !GTEST_OS_SYMBIAN
+#include <utility>
+#endif
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
#include <gtest/internal/gtest-internal.h>
#include <gtest/internal/gtest-param-util.h>
+#if GTEST_HAS_PARAM_TEST
+
namespace testing {
// Functions producing parameter generators.
@@ -1190,7 +1197,7 @@ inline internal::ParamGenerator<bool> Bool() {
return Values(false, true);
}
-#ifdef GTEST_HAS_COMBINE
+#if GTEST_HAS_COMBINE
// Combine() allows the user to combine two or more sequences to produce
// values of a Cartesian product of those sequences' elements.
//
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h
index a4e387a..c41da48 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-spi.h
@@ -48,7 +48,7 @@ namespace testing {
// generated in the same thread that created this object or it can intercept
// all generated failures. The scope of this mock object can be controlled with
// the second argument to the two arguments constructor.
-class ScopedFakeTestPartResultReporter
+class GTEST_API_ ScopedFakeTestPartResultReporter
: public TestPartResultReporterInterface {
public:
// The two possible mocking modes of this object.
@@ -93,16 +93,16 @@ namespace internal {
// TestPartResultArray contains exactly one failure that has the given
// type and contains the given substring. If that's not the case, a
// non-fatal failure will be generated.
-class SingleFailureChecker {
+class GTEST_API_ SingleFailureChecker {
public:
// The constructor remembers the arguments.
SingleFailureChecker(const TestPartResultArray* results,
- TestPartResultType type,
+ TestPartResult::Type type,
const char* substr);
~SingleFailureChecker();
private:
const TestPartResultArray* const results_;
- const TestPartResultType type_;
+ const TestPartResult::Type type_;
const String substr_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
@@ -143,14 +143,14 @@ class SingleFailureChecker {
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
- &gtest_failures, ::testing::TPRT_FATAL_FAILURE, (substr));\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
- } while (false)
+ } while (::testing::internal::AlwaysFalse())
#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do { \
@@ -160,14 +160,14 @@ class SingleFailureChecker {
};\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
- &gtest_failures, ::testing::TPRT_FATAL_FAILURE, (substr));\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ALL_THREADS, &gtest_failures);\
GTestExpectFatalFailureHelper::Execute();\
}\
- } while (false)
+ } while (::testing::internal::AlwaysFalse())
// A macro for testing Google Test assertions or code that's expected to
// generate Google Test non-fatal failures. It asserts that the given
@@ -190,32 +190,43 @@ class SingleFailureChecker {
// Note that even though the implementations of the following two
// macros are much alike, we cannot refactor them to use a common
// helper macro, due to some peculiarity in how the preprocessor
-// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
-// gtest_unittest.cc will fail to compile if we do that.
+// works. If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma. The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+// if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
#define EXPECT_NONFATAL_FAILURE(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
- &gtest_failures, ::testing::TPRT_NONFATAL_FAILURE, (substr));\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter:: \
INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
- statement;\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
}\
- } while (false)
+ } while (::testing::internal::AlwaysFalse())
#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
do {\
::testing::TestPartResultArray gtest_failures;\
::testing::internal::SingleFailureChecker gtest_checker(\
- &gtest_failures, ::testing::TPRT_NONFATAL_FAILURE, (substr));\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
{\
::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\
&gtest_failures);\
- statement;\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
}\
- } while (false)
+ } while (::testing::internal::AlwaysFalse())
#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h
index 1a281af..f714759 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-test-part.h
@@ -34,41 +34,42 @@
#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
#include <iosfwd>
+#include <vector>
#include <gtest/internal/gtest-internal.h>
#include <gtest/internal/gtest-string.h>
namespace testing {
-// The possible outcomes of a test part (i.e. an assertion or an
-// explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
-enum TestPartResultType {
- TPRT_SUCCESS, // Succeeded.
- TPRT_NONFATAL_FAILURE, // Failed but the test can continue.
- TPRT_FATAL_FAILURE // Failed and the test should be terminated.
-};
-
// A copyable object representing the result of a test part (i.e. an
// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
//
// Don't inherit from TestPartResult as its destructor is not virtual.
-class TestPartResult {
+class GTEST_API_ TestPartResult {
public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure // Failed and the test should be terminated.
+ };
+
// C'tor. TestPartResult does NOT have a default constructor.
// Always use this constructor (with parameters) to create a
// TestPartResult object.
- TestPartResult(TestPartResultType type,
- const char* file_name,
- int line_number,
- const char* message)
- : type_(type),
- file_name_(file_name),
- line_number_(line_number),
- summary_(ExtractSummary(message)),
- message_(message) {
+ TestPartResult(Type a_type,
+ const char* a_file_name,
+ int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {
}
// Gets the outcome of the test part.
- TestPartResultType type() const { return type_; }
+ Type type() const { return type_; }
// Gets the name of the source file where the test part took place, or
// NULL if it's unknown.
@@ -85,18 +86,18 @@ class TestPartResult {
const char* message() const { return message_.c_str(); }
// Returns true iff the test part passed.
- bool passed() const { return type_ == TPRT_SUCCESS; }
+ bool passed() const { return type_ == kSuccess; }
// Returns true iff the test part failed.
- bool failed() const { return type_ != TPRT_SUCCESS; }
+ bool failed() const { return type_ != kSuccess; }
// Returns true iff the test part non-fatally failed.
- bool nonfatally_failed() const { return type_ == TPRT_NONFATAL_FAILURE; }
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
// Returns true iff the test part fatally failed.
- bool fatally_failed() const { return type_ == TPRT_FATAL_FAILURE; }
+ bool fatally_failed() const { return type_ == kFatalFailure; }
private:
- TestPartResultType type_;
+ Type type_;
// Gets the summary of the failure message by omitting the stack
// trace in it.
@@ -117,15 +118,11 @@ std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
// An array of TestPartResult objects.
//
-// We define this class as we cannot use STL containers when compiling
-// Google Test with MSVC 7.1 and exceptions disabled.
-//
// Don't inherit from TestPartResultArray as its destructor is not
// virtual.
-class TestPartResultArray {
+class GTEST_API_ TestPartResultArray {
public:
- TestPartResultArray();
- ~TestPartResultArray();
+ TestPartResultArray() {}
// Appends the given TestPartResult to the array.
void Append(const TestPartResult& result);
@@ -135,10 +132,9 @@ class TestPartResultArray {
// Returns the number of TestPartResult objects in the array.
int size() const;
+
private:
- // Internally we use a list to simulate the array. Yes, this means
- // that random access is O(N) in time, but it's OK for its purpose.
- internal::List<TestPartResult>* const list_;
+ std::vector<TestPartResult> array_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
};
@@ -159,7 +155,8 @@ namespace internal {
// reported, it only delegates the reporting to the former result reporter.
// The original result reporter is restored in the destructor.
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-class HasNewFatalFailureHelper : public TestPartResultReporterInterface {
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
public:
HasNewFatalFailureHelper();
virtual ~HasNewFatalFailureHelper();
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h
index dec42cf..1ec8eb8 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest-typed-test.h
@@ -151,7 +151,7 @@ INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
// Implements typed tests.
-#ifdef GTEST_HAS_TYPED_TEST
+#if GTEST_HAS_TYPED_TEST
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
@@ -159,8 +159,11 @@ INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
// given test case.
#define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
#define TYPED_TEST_CASE(CaseName, Types) \
- typedef ::testing::internal::TypeList<Types>::type \
+ typedef ::testing::internal::TypeList< Types >::type \
GTEST_TYPE_PARAMS_(CaseName)
#define TYPED_TEST(CaseName, TestName) \
@@ -186,7 +189,7 @@ INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
// Implements type-parameterized tests.
-#ifdef GTEST_HAS_TYPED_TEST_P
+#if GTEST_HAS_TYPED_TEST_P
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
@@ -241,11 +244,14 @@ INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
__FILE__, __LINE__, #__VA_ARGS__)
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
#define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
bool gtest_##Prefix##_##CaseName = \
::testing::internal::TypeParameterizedTestCase<CaseName, \
GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
- ::testing::internal::TypeList<Types>::type>::Register(\
+ ::testing::internal::TypeList< Types >::type>::Register(\
#Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
#endif // GTEST_HAS_TYPED_TEST_P
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest.h b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest.h
index ebd3123..921fad1 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/gtest.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/gtest.h
@@ -51,17 +51,9 @@
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
#define GTEST_INCLUDE_GTEST_GTEST_H_
-// The following platform macros are used throughout Google Test:
-// _WIN32_WCE Windows CE (set in project files)
-//
-// Note that even though _MSC_VER and _WIN32_WCE really indicate a compiler
-// and a Win32 implementation, respectively, we use them to indicate the
-// combination of compiler - Win 32 API - C library, since the code currently
-// only supports:
-// Windows proper with Visual C++ and MS C library (_MSC_VER && !_WIN32_WCE) and
-// Windows Mobile with Visual C++ and no C library (_WIN32_WCE).
-
#include <limits>
+#include <vector>
+
#include <gtest/internal/gtest-internal.h>
#include <gtest/internal/gtest-string.h>
#include <gtest/gtest-death-test.h>
@@ -72,41 +64,99 @@
#include <gtest/gtest-typed-test.h>
// Depending on the platform, different string classes are available.
-// On Windows, ::std::string compiles only when exceptions are
-// enabled. On Linux, in addition to ::std::string, Google also makes
-// use of class ::string, which has the same interface as
-// ::std::string, but has a different implementation.
-//
-// The user can tell us whether ::std::string is available in his
-// environment by defining the macro GTEST_HAS_STD_STRING to either 1
-// or 0 on the compiler command line. He can also define
-// GTEST_HAS_GLOBAL_STRING to 1 to indicate that ::string is available
-// AND is a distinct type to ::std::string, or define it to 0 to
-// indicate otherwise.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
//
// If the user's ::std::string and ::string are the same class due to
-// aliasing, he should define GTEST_HAS_STD_STRING to 1 and
-// GTEST_HAS_GLOBAL_STRING to 0.
+// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0.
//
-// If the user doesn't define GTEST_HAS_STD_STRING and/or
-// GTEST_HAS_GLOBAL_STRING, they are defined heuristically.
+// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
namespace testing {
-// The upper limit for valid stack trace depths.
-const int kMaxStackTraceDepth = 100;
+// Declares the flags.
-// This flag specifies the maximum number of stack frames to be
-// printed in a failure message.
-GTEST_DECLARE_int32_(stack_trace_depth);
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
// This flag controls whether Google Test includes Google Test internal
// stack frames in failure stack traces.
GTEST_DECLARE_bool_(show_internal_stack_frames);
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
namespace internal {
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
class GTestFlagSaver;
+class TestInfoImpl;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message);
+class PrettyUnitTestResultPrinter;
+class XmlUnitTestResultPrinter;
// Converts a streamable value to a String. A NULL pointer is
// converted to "(null)". When the input value is a ::string,
@@ -124,64 +174,146 @@ String StreamableToString(const T& streamable) {
// A class for indicating whether an assertion was successful. When
// the assertion wasn't successful, the AssertionResult object
-// remembers a non-empty message that described how it failed.
+// remembers a non-empty message that describes how it failed.
//
-// This class is useful for defining predicate-format functions to be
-// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
-//
-// The constructor of AssertionResult is private. To create an
-// instance of this class, use one of the factory functions
+// To create an instance of this class, use one of the factory functions
// (AssertionSuccess() and AssertionFailure()).
//
-// For example, in order to be able to write:
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
//
// // Verifies that Foo() returns an even number.
// EXPECT_PRED_FORMAT1(IsEven, Foo());
//
-// you just need to define:
+// you need to define:
//
// testing::AssertionResult IsEven(const char* expr, int n) {
-// if ((n % 2) == 0) return testing::AssertionSuccess();
-//
-// Message msg;
-// msg << "Expected: " << expr << " is even\n"
-// << " Actual: it's " << n;
-// return testing::AssertionFailure(msg);
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
// }
//
// If Foo() returns 5, you will see the following message:
//
// Expected: Foo() is even
// Actual: it's 5
-class AssertionResult {
+//
+class GTEST_API_ AssertionResult {
public:
- // Declares factory functions for making successful and failed
- // assertion results as friends.
- friend AssertionResult AssertionSuccess();
- friend AssertionResult AssertionFailure(const Message&);
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ explicit AssertionResult(bool success) : success_(success) {}
// Returns true iff the assertion succeeded.
- operator bool() const { return failure_message_.c_str() == NULL; } // NOLINT
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != NULL && message_->c_str() != NULL ?
+ message_->c_str() : "";
+ }
+ // TODO(vladl@google.com): Remove this after making sure no clients use it.
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
- // Returns the assertion's failure message.
- const char* failure_message() const { return failure_message_.c_str(); }
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value);
private:
- // The default constructor. It is used when the assertion succeeded.
- AssertionResult() {}
-
- // The constructor used when the assertion failed.
- explicit AssertionResult(const internal::String& failure_message);
-
- // Stores the assertion's failure message.
- internal::String failure_message_;
-};
+ // No implementation - we want AssertionResult to be
+ // copy-constructible but not assignable.
+ void operator=(const AssertionResult& other);
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ internal::scoped_ptr<internal::String> message_;
+}; // class AssertionResult
+
+// Streams a custom failure message into this object.
+template <typename T>
+AssertionResult& AssertionResult::operator<<(const T& value) {
+ Message msg;
+ if (message_.get() != NULL)
+ msg << *message_;
+ msg << value;
+ message_.reset(new internal::String(msg.GetString()));
+ return *this;
+}
// Makes a successful assertion result.
-AssertionResult AssertionSuccess();
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
// Makes a failed assertion result with the given failure message.
-AssertionResult AssertionFailure(const Message& msg);
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
// The abstract class that all tests inherit from.
//
@@ -206,7 +338,7 @@ AssertionResult AssertionFailure(const Message& msg);
// TEST_F(FooTest, Baz) { ... }
//
// Test is not copyable.
-class Test {
+class GTEST_API_ Test {
public:
friend class internal::TestInfoImpl;
@@ -237,6 +369,13 @@ class Test {
// Returns true iff the current test has a fatal failure.
static bool HasFatalFailure();
+ // Returns true iff the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true iff the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
// Logs a property for the current test. Only the last value for a given
// key is remembered.
// These are public static so they can be called from utility functions
@@ -304,6 +443,155 @@ class Test {
GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
};
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const char* a_key, const char* a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const char* new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ internal::String key_;
+ // The value supplied by the user.
+ internal::String value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true iff the test passed (i.e. no test part failed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test failed.
+ bool Failed() const;
+
+ // Returns true iff the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true iff the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test part result among all the results. i can range
+ // from 0 to test_property_count() - 1. If i is not in that range, aborts
+ // the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestInfoImpl;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key.
+ void RecordProperty(const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testcase tags. Returns true if the property is valid.
+ // TODO(russr): Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properites_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
// A TestInfo object stores the following information about a test:
//
@@ -316,7 +604,7 @@ class Test {
// The constructor of TestInfo registers itself with the UnitTest
// singleton such that the RUN_ALL_TESTS() macro knows which tests to
// run.
-class TestInfo {
+class GTEST_API_ TestInfo {
public:
// Destructs a TestInfo object. This function is not virtual, so
// don't inherit from TestInfo.
@@ -334,7 +622,9 @@ class TestInfo {
// Returns the test comment.
const char* comment() const;
- // Returns true if this test should run.
+ // Returns true if this test should run, that is if the test is not disabled
+ // (or it is disabled but the also_run_disabled_tests flag has been specified)
+ // and its full name matches the user-specified filter.
//
// Google Test allows the user to filter the tests by their full names.
// The full name of a test Bar in test case Foo is defined as
@@ -351,15 +641,16 @@ class TestInfo {
bool should_run() const;
// Returns the result of the test.
- const internal::TestResult* result() const;
+ const TestResult* result() const;
+
private:
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
friend class internal::DefaultDeathTestFactory;
#endif // GTEST_HAS_DEATH_TEST
- friend class internal::TestInfoImpl;
- friend class internal::UnitTestImpl;
friend class Test;
friend class TestCase;
+ friend class internal::TestInfoImpl;
+ friend class internal::UnitTestImpl;
friend TestInfo* internal::MakeAndRegisterTestInfo(
const char* test_case_name, const char* name,
const char* test_case_comment, const char* comment,
@@ -368,6 +659,9 @@ class TestInfo {
Test::TearDownTestCaseFunc tear_down_tc,
internal::TestFactoryBase* factory);
+ // Returns true if this test matches the user-specified filter.
+ bool matches_filter() const;
+
// Increments the number of death tests encountered in this test so
// far.
int increment_death_test_count();
@@ -389,6 +683,141 @@ class TestInfo {
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
};
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+ // Creates a TestCase with the given name.
+ //
+ // TestCase does NOT have a default constructor. Always use this
+ // constructor to create a TestCase object.
+ //
+ // Arguments:
+ //
+ // name: name of the test case
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase(const char* name, const char* comment,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Destructor of TestCase.
+ virtual ~TestCase();
+
+ // Gets the name of the TestCase.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the test case comment.
+ const char* comment() const { return comment_.c_str(); }
+
+ // Returns true if any test in this test case should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test case.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests in this test case.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests in this test case.
+ int disabled_test_count() const;
+
+ // Get the number of tests in this test case that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test case.
+ int total_test_count() const;
+
+ // Returns true iff the test case passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test case failed.
+ bool Failed() const { return failed_test_count() > 0; }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestCase.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestCase.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test case. Will delete the TestInfo upon
+ // destruction of the TestCase object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test case.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test case.
+ static void ClearTestCaseResult(TestCase* test_case) {
+ test_case->ClearResult();
+ }
+
+ // Runs every test in this TestCase.
+ void Run();
+
+ // Returns true iff test passed.
+ static bool TestPassed(const TestInfo * test_info);
+
+ // Returns true iff test failed.
+ static bool TestFailed(const TestInfo * test_info);
+
+ // Returns true iff test is disabled.
+ static bool TestDisabled(const TestInfo * test_info);
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo *test_info);
+
+ // Shuffles the tests in this test case.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test case.
+ internal::String name_;
+ // Comment on the test case.
+ internal::String comment_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test case.
+ Test::SetUpTestCaseFunc set_up_tc_;
+ // Pointer to the function that tears down the test case.
+ Test::TearDownTestCaseFunc tear_down_tc_;
+ // True iff any test in this test case should run.
+ bool should_run_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestCases.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
// An Environment object is capable of setting up and tearing down an
// environment. The user should subclass this to define his own
// environment(s).
@@ -420,7 +849,159 @@ class Environment {
virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
};
-// A UnitTest consists of a list of TestCases.
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test case starts.
+ virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCESS().
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test case ends.
+ virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+ virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+ virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+ virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestCase;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::TestInfoImpl;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
//
// This is a singleton class. The only instance of UnitTest is
// created when UnitTest::GetInstance() is first called. This
@@ -430,40 +1011,13 @@ class Environment {
//
// This class is thread-safe as long as the methods are called
// according to their specification.
-class UnitTest {
+class GTEST_API_ UnitTest {
public:
// Gets the singleton UnitTest object. The first time this method
// is called, a UnitTest object is constructed and returned.
// Consecutive calls will return the same object.
static UnitTest* GetInstance();
- // Registers and returns a global test environment. When a test
- // program is run, all global test environments will be set-up in
- // the order they were registered. After all tests in the program
- // have finished, all global test environments will be torn-down in
- // the *reverse* order they were registered.
- //
- // The UnitTest object takes ownership of the given environment.
- //
- // This method can only be called from the main thread.
- Environment* AddEnvironment(Environment* env);
-
- // Adds a TestPartResult to the current TestResult object. All
- // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
- // eventually call this to report their results. The user code
- // should use the assertion macros instead of calling this directly.
- //
- // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
- void AddTestPartResult(TestPartResultType result_type,
- const char* file_name,
- int line_number,
- const internal::String& message,
- const internal::String& os_stack_trace);
-
- // Adds a TestProperty to the current TestResult object. If the result already
- // contains a property with the same key, the value will be updated.
- void RecordPropertyForCurrentTest(const char* key, const char* value);
-
// Runs all tests in this UnitTest object and prints the result.
// Returns 0 if successful, or 1 otherwise.
//
@@ -484,19 +1038,107 @@ class UnitTest {
// or NULL if no test is running.
const TestInfo* current_test_info() const;
-#ifdef GTEST_HAS_PARAM_TEST
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
// Returns the ParameterizedTestCaseRegistry object used to keep track of
// value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
internal::ParameterizedTestCaseRegistry& parameterized_test_registry();
#endif // GTEST_HAS_PARAM_TEST
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const;
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const internal::String& message,
+ const internal::String& os_stack_trace);
+
+ // Adds a TestProperty to the current TestResult object. If the result already
+ // contains a property with the same key, the value will be updated.
+ void RecordPropertyForCurrentTest(const char* key, const char* value);
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i);
+
// Accessors for the implementation object.
internal::UnitTestImpl* impl() { return impl_; }
const internal::UnitTestImpl* impl() const { return impl_; }
- private:
- // ScopedTrace is a friend as it needs to modify the per-thread
- // trace stack, which is a private member of UnitTest.
+
+ // These classes and funcions are friends as they need to access private
+ // members of UnitTest.
+ friend class Test;
+ friend class internal::AssertHelper;
friend class internal::ScopedTrace;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const internal::String& message);
// Creates an empty UnitTest.
UnitTest();
@@ -556,36 +1198,34 @@ inline Environment* AddGlobalTestEnvironment(Environment* env) {
// updated.
//
// Calling the function for the second time has no user-visible effect.
-void InitGoogleTest(int* argc, char** argv);
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
// This overloaded version can be used in Windows programs compiled in
// UNICODE mode.
-void InitGoogleTest(int* argc, wchar_t** argv);
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
namespace internal {
// These overloaded versions handle ::std::string and ::std::wstring.
-#if GTEST_HAS_STD_STRING
-inline String FormatForFailureMessage(const ::std::string& str) {
+GTEST_API_ inline String FormatForFailureMessage(const ::std::string& str) {
return (Message() << '"' << str << '"').GetString();
}
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_STD_WSTRING
-inline String FormatForFailureMessage(const ::std::wstring& wstr) {
+GTEST_API_ inline String FormatForFailureMessage(const ::std::wstring& wstr) {
return (Message() << "L\"" << wstr << '"').GetString();
}
#endif // GTEST_HAS_STD_WSTRING
// These overloaded versions handle ::string and ::wstring.
#if GTEST_HAS_GLOBAL_STRING
-inline String FormatForFailureMessage(const ::string& str) {
+GTEST_API_ inline String FormatForFailureMessage(const ::string& str) {
return (Message() << '"' << str << '"').GetString();
}
#endif // GTEST_HAS_GLOBAL_STRING
#if GTEST_HAS_GLOBAL_WSTRING
-inline String FormatForFailureMessage(const ::wstring& wstr) {
+GTEST_API_ inline String FormatForFailureMessage(const ::wstring& wstr) {
return (Message() << "L\"" << wstr << '"').GetString();
}
#endif // GTEST_HAS_GLOBAL_WSTRING
@@ -614,10 +1254,20 @@ AssertionResult CmpHelperEQ(const char* expected_expression,
const char* actual_expression,
const T1& expected,
const T2& actual) {
+#ifdef _MSC_VER
+#pragma warning(push) // Saves the current warning state.
+#pragma warning(disable:4389) // Temporarily disables warning on
+ // signed/unsigned mismatch.
+#endif
+
if (expected == actual) {
return AssertionSuccess();
}
+#ifdef _MSC_VER
+#pragma warning(pop) // Restores the warning state.
+#endif
+
return EqFailure(expected_expression,
actual_expression,
FormatForComparisonFailureMessage(expected, actual),
@@ -628,10 +1278,10 @@ AssertionResult CmpHelperEQ(const char* expected_expression,
// With this overloaded version, we allow anonymous enums to be used
// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
// can be implicitly cast to BiggestInt.
-AssertionResult CmpHelperEQ(const char* expected_expression,
- const char* actual_expression,
- BiggestInt expected,
- BiggestInt actual);
+GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual);
// The helper class for {ASSERT|EXPECT}_EQ. The template argument
// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
@@ -688,7 +1338,7 @@ class EqHelper<true> {
template <typename T1, typename T2>
static AssertionResult Compare(const char* expected_expression,
const char* actual_expression,
- const T1& expected,
+ const T1& /* expected */,
T2* actual) {
// We already know that 'expected' is a null pointer.
return CmpHelperEQ(expected_expression, actual_expression,
@@ -720,72 +1370,72 @@ AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
return AssertionFailure(msg);\
}\
}\
-AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
- BiggestInt val1, BiggestInt val2);
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+ const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
// Implements the helper function for {ASSERT|EXPECT}_NE
-GTEST_IMPL_CMP_HELPER_(NE, !=)
+GTEST_IMPL_CMP_HELPER_(NE, !=);
// Implements the helper function for {ASSERT|EXPECT}_LE
-GTEST_IMPL_CMP_HELPER_(LE, <=)
+GTEST_IMPL_CMP_HELPER_(LE, <=);
// Implements the helper function for {ASSERT|EXPECT}_LT
-GTEST_IMPL_CMP_HELPER_(LT, < )
+GTEST_IMPL_CMP_HELPER_(LT, < );
// Implements the helper function for {ASSERT|EXPECT}_GE
-GTEST_IMPL_CMP_HELPER_(GE, >=)
+GTEST_IMPL_CMP_HELPER_(GE, >=);
// Implements the helper function for {ASSERT|EXPECT}_GT
-GTEST_IMPL_CMP_HELPER_(GT, > )
+GTEST_IMPL_CMP_HELPER_(GT, > );
#undef GTEST_IMPL_CMP_HELPER_
// The helper function for {ASSERT|EXPECT}_STREQ.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual);
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
- const char* actual_expression,
- const char* expected,
- const char* actual);
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
// The helper function for {ASSERT|EXPECT}_STRNE.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2);
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
// The helper function for {ASSERT|EXPECT}_STRCASENE.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
- const char* s2_expression,
- const char* s1,
- const char* s2);
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
// Helper function for *_STREQ on wide strings.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTREQ(const char* expected_expression,
- const char* actual_expression,
- const wchar_t* expected,
- const wchar_t* actual);
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual);
// Helper function for *_STRNE on wide strings.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult CmpHelperSTRNE(const char* s1_expression,
- const char* s2_expression,
- const wchar_t* s1,
- const wchar_t* s2);
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
} // namespace internal
@@ -797,32 +1447,30 @@ AssertionResult CmpHelperSTRNE(const char* s1_expression,
//
// The {needle,haystack}_expr arguments are the stringified
// expressions that generated the two real arguments.
-AssertionResult IsSubstring(
+GTEST_API_ AssertionResult IsSubstring(
const char* needle_expr, const char* haystack_expr,
const char* needle, const char* haystack);
-AssertionResult IsSubstring(
+GTEST_API_ AssertionResult IsSubstring(
const char* needle_expr, const char* haystack_expr,
const wchar_t* needle, const wchar_t* haystack);
-AssertionResult IsNotSubstring(
+GTEST_API_ AssertionResult IsNotSubstring(
const char* needle_expr, const char* haystack_expr,
const char* needle, const char* haystack);
-AssertionResult IsNotSubstring(
+GTEST_API_ AssertionResult IsNotSubstring(
const char* needle_expr, const char* haystack_expr,
const wchar_t* needle, const wchar_t* haystack);
-#if GTEST_HAS_STD_STRING
-AssertionResult IsSubstring(
+GTEST_API_ AssertionResult IsSubstring(
const char* needle_expr, const char* haystack_expr,
const ::std::string& needle, const ::std::string& haystack);
-AssertionResult IsNotSubstring(
+GTEST_API_ AssertionResult IsNotSubstring(
const char* needle_expr, const char* haystack_expr,
const ::std::string& needle, const ::std::string& haystack);
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_STD_WSTRING
-AssertionResult IsSubstring(
+GTEST_API_ AssertionResult IsSubstring(
const char* needle_expr, const char* haystack_expr,
const ::std::wstring& needle, const ::std::wstring& haystack);
-AssertionResult IsNotSubstring(
+GTEST_API_ AssertionResult IsNotSubstring(
const char* needle_expr, const char* haystack_expr,
const ::std::wstring& needle, const ::std::wstring& haystack);
#endif // GTEST_HAS_STD_WSTRING
@@ -865,35 +1513,57 @@ AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
// Helper function for implementing ASSERT_NEAR.
//
// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
-AssertionResult DoubleNearPredFormat(const char* expr1,
- const char* expr2,
- const char* abs_error_expr,
- double val1,
- double val2,
- double abs_error);
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
// A class that enables one to stream messages to assertion macros
-class AssertHelper {
+class GTEST_API_ AssertHelper {
public:
// Constructor.
- AssertHelper(TestPartResultType type, const char* file, int line,
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
const char* message);
+ ~AssertHelper();
+
// Message assignment is a semantic trick to enable assertion
// streaming; see the GTEST_MESSAGE_ macro below.
void operator=(const Message& message) const;
+
private:
- TestPartResultType const type_;
- const char* const file_;
- int const line_;
- String const message_;
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ String const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
};
} // namespace internal
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
// The abstract base class that all value-parameterized tests inherit from.
//
// This class adds support for accessing the test parameter value via
@@ -981,10 +1651,22 @@ const T* TestWithParam<T>::parameter_ = NULL;
#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
// Generates a fatal failure with a generic message.
-#define FAIL() GTEST_FATAL_FAILURE_("Failed")
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+#define FAIL() GTEST_FAIL()
+#endif
// Generates a success with a generic message.
-#define SUCCEED() GTEST_SUCCESS_("Succeeded")
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+#define SUCCEED() GTEST_SUCCEED()
+#endif
// Macros for testing exceptions.
//
@@ -1008,7 +1690,9 @@ const T* TestWithParam<T>::parameter_ = NULL;
#define ASSERT_ANY_THROW(statement) \
GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
-// Boolean assertions.
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
#define EXPECT_TRUE(condition) \
GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
GTEST_NONFATAL_FAILURE_)
@@ -1181,13 +1865,13 @@ const T* TestWithParam<T>::parameter_ = NULL;
// Asserts that val1 is less than, or almost equal to, val2. Fails
// otherwise. In particular, it fails if either val1 or val2 is NaN.
-AssertionResult FloatLE(const char* expr1, const char* expr2,
- float val1, float val2);
-AssertionResult DoubleLE(const char* expr1, const char* expr2,
- double val1, double val2);
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// Macros that test for HRESULT failure and success, these are only useful
// on Windows, and rely on Windows SDK macros and APIs to compile.
@@ -1242,6 +1926,52 @@ AssertionResult DoubleLE(const char* expr1, const char* expr2,
::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
__FILE__, __LINE__, ::testing::Message() << (message))
+namespace internal {
+
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {};
+
+} // namespace internal
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+ internal::StaticAssertTypeEqHelper<T1, T2>();
+ return true;
+}
// Defines a test.
//
@@ -1268,10 +1998,15 @@ AssertionResult DoubleLE(const char* expr1, const char* expr2,
// code. GetTestTypeId() is guaranteed to always return the same
// value, as it always calls GetTypeId<>() from the Google Test
// framework.
-#define TEST(test_case_name, test_name)\
- GTEST_TEST_(test_case_name, test_name,\
+#define GTEST_TEST(test_case_name, test_name)\
+ GTEST_TEST_(test_case_name, test_name, \
::testing::Test, ::testing::internal::GetTestTypeId())
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+#define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
// Defines a test that uses a test fixture.
//
@@ -1300,7 +2035,7 @@ AssertionResult DoubleLE(const char* expr1, const char* expr2,
// }
#define TEST_F(test_fixture, test_name)\
- GTEST_TEST_(test_fixture, test_name, test_fixture,\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
::testing::internal::GetTypeId<test_fixture>())
// Use this macro in main() to run all tests. It returns 0 if all
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h
index 0769fca..e433084 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-death-test-internal.h
@@ -46,9 +46,10 @@ GTEST_DECLARE_string_(internal_run_death_test);
// Names of the flags (needed for parsing Google Test flags).
const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
// DeathTest is a class that hides much of the complexity of the
// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
@@ -63,7 +64,7 @@ const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
// by wait(2)
// exit code: The integer code passed to exit(3), _exit(2), or
// returned from main()
-class DeathTest {
+class GTEST_API_ DeathTest {
public:
// Create returns false if there was an error determining the
// appropriate action to take for the current death test; for example,
@@ -120,7 +121,12 @@ class DeathTest {
// the last death test.
static const char* LastMessage();
+ static void set_last_death_test_message(const String& message);
+
private:
+ // A string containing a description of the outcome of the last death test.
+ static String last_death_test_message_;
+
GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
};
@@ -141,13 +147,13 @@ class DefaultDeathTestFactory : public DeathTestFactory {
// Returns true if exit_status describes a process that was terminated
// by a signal, or exited normally with a nonzero exit code.
-bool ExitedUnsuccessfully(int exit_status);
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
// ASSERT_EXIT*, and EXPECT_EXIT*.
#define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (true) { \
+ if (::testing::internal::AlwaysTrue()) { \
const ::testing::internal::RE& gtest_regex = (regex); \
::testing::internal::DeathTest* gtest_dt; \
if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
@@ -166,7 +172,7 @@ bool ExitedUnsuccessfully(int exit_status);
case ::testing::internal::DeathTest::EXECUTE_TEST: { \
::testing::internal::DeathTest::ReturnSentinel \
gtest_sentinel(gtest_dt); \
- { statement; } \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
break; \
} \
@@ -178,14 +184,35 @@ bool ExitedUnsuccessfully(int exit_status);
// The symbol "fail" here expands to something into which a message
// can be streamed.
-// A struct representing the parsed contents of the
+// A class representing the parsed contents of the
// --gtest_internal_run_death_test flag, as it existed when
// RUN_ALL_TESTS was called.
-struct InternalRunDeathTestFlag {
- String file;
- int line;
- int index;
- int status_fd;
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const String& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ String file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ String file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
};
// Returns a newly created InternalRunDeathTestFlag object with fields
@@ -193,6 +220,53 @@ struct InternalRunDeathTestFlag {
// the flag is specified; otherwise returns NULL.
InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+#else // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter iff EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+#define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
#endif // GTEST_HAS_DEATH_TEST
} // namespace internal
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h
index 9a0682a..4b76d79 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h
@@ -34,7 +34,7 @@
// This header file declares classes and functions used internally by
// Google Test. They are subject to change without notice.
//
-// This file is #included in testing/base/internal/gtest-internal.h
+// This file is #included in <gtest/internal/gtest-internal.h>.
// Do not include this header file separately!
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
@@ -56,7 +56,7 @@ namespace internal {
// Names are NOT checked for syntax correctness -- no checking for illegal
// characters, malformed paths, etc.
-class FilePath {
+class GTEST_API_ FilePath {
public:
FilePath() : pathname_("") { }
FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
@@ -93,6 +93,12 @@ class FilePath {
int number,
const char* extension);
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
// Returns a pathname for a file that does not currently exist. The pathname
// will be directory/base_name.extension or
// directory/base_name_<number>.extension if directory/base_name.extension
@@ -164,6 +170,9 @@ class FilePath {
// root directory per disk drive.)
bool IsRootDirectory() const;
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
private:
// Replaces multiple consecutive separators with a single separator.
// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
@@ -180,9 +189,18 @@ class FilePath {
// particular, RemoveTrailingPathSeparator() only removes one separator, and
// it is called in CreateDirectoriesRecursively() assuming that it will change
// a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
void Normalize();
+ // Returns a pointer to the last occurence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
String pathname_;
}; // class FilePath
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h
index b8f67c1..855b215 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal-inl.h
@@ -37,50 +37,51 @@
#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
#define GTEST_SRC_GTEST_INTERNAL_INL_H_
-// GTEST_IMPLEMENTATION is defined iff the current translation unit is
-// part of Google Test's implementation.
-#ifndef GTEST_IMPLEMENTATION
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
// A user is trying to include this from his code - just say no.
#error "gtest-internal-inl.h is part of Google Test's internal implementation."
#error "It must not be included except by Google Test itself."
-#endif // GTEST_IMPLEMENTATION
+#endif // GTEST_IMPLEMENTATION_
+#ifndef _WIN32_WCE
+#include <errno.h>
+#endif // !_WIN32_WCE
#include <stddef.h>
+#include <stdlib.h> // For strtoll/_strtoul64/malloc/free.
+#include <string.h> // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_OS_WINDOWS
-#include <windows.h> // NOLINT
+#if GTEST_OS_WINDOWS
+#include <windows.h> // For DWORD.
#endif // GTEST_OS_WINDOWS
-#include <gtest/gtest.h>
+#include <gtest/gtest.h> // NOLINT
#include <gtest/gtest-spi.h>
namespace testing {
// Declares the flags.
//
-// We don't want the users to modify these flags in the code, but want
-// Google Test's own unit tests to be able to access them. Therefore we
-// declare them here as opposed to in gtest.h.
-GTEST_DECLARE_bool_(break_on_failure);
-GTEST_DECLARE_bool_(catch_exceptions);
-GTEST_DECLARE_string_(color);
-GTEST_DECLARE_string_(filter);
-GTEST_DECLARE_bool_(list_tests);
-GTEST_DECLARE_string_(output);
-GTEST_DECLARE_bool_(print_time);
-GTEST_DECLARE_int32_(repeat);
-GTEST_DECLARE_int32_(stack_trace_depth);
-GTEST_DECLARE_bool_(show_internal_stack_frames);
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
namespace internal {
// The value of GetTestTypeId() as seen from within the Google Test
// library. This is solely for testing GetTestTypeId().
-extern const TypeId kTestTypeIdInGoogleTest;
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
const char kBreakOnFailureFlag[] = "break_on_failure";
const char kCatchExceptionsFlag[] = "catch_exceptions";
const char kColorFlag[] = "color";
@@ -88,7 +89,60 @@ const char kFilterFlag[] = "filter";
const char kListTestsFlag[] = "list_tests";
const char kOutputFlag[] = "output";
const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+ const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+ const unsigned int raw_seed = (random_seed_flag == 0) ?
+ static_cast<unsigned int>(GetTimeInMillis()) :
+ static_cast<unsigned int>(random_seed_flag);
+
+ // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+ // it's easy to type.
+ const int normalized_seed =
+ static_cast<int>((raw_seed - 1U) %
+ static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+ return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'. The behavior is
+// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+ GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+ << "Invalid random seed " << seed << " - must be in [1, "
+ << kMaxRandomSeed << "].";
+ const int next_seed = seed + 1;
+ return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
// This class saves the values of all Google Test flags in its c'tor, and
// restores them in its d'tor.
@@ -96,44 +150,62 @@ class GTestFlagSaver {
public:
// The c'tor.
GTestFlagSaver() {
+ also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
break_on_failure_ = GTEST_FLAG(break_on_failure);
catch_exceptions_ = GTEST_FLAG(catch_exceptions);
color_ = GTEST_FLAG(color);
death_test_style_ = GTEST_FLAG(death_test_style);
+ death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
filter_ = GTEST_FLAG(filter);
internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
list_tests_ = GTEST_FLAG(list_tests);
output_ = GTEST_FLAG(output);
print_time_ = GTEST_FLAG(print_time);
+ random_seed_ = GTEST_FLAG(random_seed);
repeat_ = GTEST_FLAG(repeat);
+ shuffle_ = GTEST_FLAG(shuffle);
+ stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+ throw_on_failure_ = GTEST_FLAG(throw_on_failure);
}
// The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
~GTestFlagSaver() {
+ GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
GTEST_FLAG(break_on_failure) = break_on_failure_;
GTEST_FLAG(catch_exceptions) = catch_exceptions_;
GTEST_FLAG(color) = color_;
GTEST_FLAG(death_test_style) = death_test_style_;
+ GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
GTEST_FLAG(filter) = filter_;
GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
GTEST_FLAG(list_tests) = list_tests_;
GTEST_FLAG(output) = output_;
GTEST_FLAG(print_time) = print_time_;
+ GTEST_FLAG(random_seed) = random_seed_;
GTEST_FLAG(repeat) = repeat_;
+ GTEST_FLAG(shuffle) = shuffle_;
+ GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+ GTEST_FLAG(throw_on_failure) = throw_on_failure_;
}
private:
// Fields for saving the original values of flags.
+ bool also_run_disabled_tests_;
bool break_on_failure_;
bool catch_exceptions_;
String color_;
String death_test_style_;
+ bool death_test_use_fork_;
String filter_;
String internal_run_death_test_;
bool list_tests_;
String output_;
bool print_time_;
bool pretty_;
+ internal::Int32 random_seed_;
internal::Int32 repeat_;
+ bool shuffle_;
+ internal::Int32 stack_trace_depth_;
+ bool throw_on_failure_;
} GTEST_ATTRIBUTE_UNUSED_;
// Converts a Unicode code point to a narrow string in UTF-8 encoding.
@@ -144,7 +216,7 @@ class GTestFlagSaver {
// If the code_point is not a valid Unicode code point
// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
// as '(Invalid Unicode 0xXXXXXXXX)'.
-char* CodePointToUtf8(UInt32 code_point, char* str);
+GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str);
// Converts a wide string to a narrow string in UTF-8 encoding.
// The wide string is assumed to have the following encoding:
@@ -159,300 +231,95 @@ char* CodePointToUtf8(UInt32 code_point, char* str);
// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
// and contains invalid UTF-16 surrogate pairs, values in those pairs
// will be encoded as individual Unicode characters from Basic Normal Plane.
-String WideStringToUtf8(const wchar_t* str, int num_chars);
-
-// Returns the number of active threads, or 0 when there is an error.
-size_t GetThreadCount();
-
-// List is a simple singly-linked list container.
-//
-// We cannot use std::list as Microsoft's implementation of STL has
-// problems when exception is disabled. There is a hack to work
-// around this, but we've seen cases where the hack fails to work.
-//
-// TODO(wan): switch to std::list when we have a reliable fix for the
-// STL problem, e.g. when we upgrade to the next version of Visual
-// C++, or (more likely) switch to STLport.
-//
-// The element type must support copy constructor.
-
-// Forward declare List
-template <typename E> // E is the element type.
-class List;
-
-// ListNode is a node in a singly-linked list. It consists of an
-// element and a pointer to the next node. The last node in the list
-// has a NULL value for its next pointer.
-template <typename E> // E is the element type.
-class ListNode {
- friend class List<E>;
-
- private:
-
- E element_;
- ListNode * next_;
-
- // The c'tor is private s.t. only in the ListNode class and in its
- // friend class List we can create a ListNode object.
- //
- // Creates a node with a given element value. The next pointer is
- // set to NULL.
- //
- // ListNode does NOT have a default constructor. Always use this
- // constructor (with parameter) to create a ListNode object.
- explicit ListNode(const E & element) : element_(element), next_(NULL) {}
-
- // We disallow copying ListNode
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ListNode);
-
- public:
-
- // Gets the element in this node.
- E & element() { return element_; }
- const E & element() const { return element_; }
-
- // Gets the next node in the list.
- ListNode * next() { return next_; }
- const ListNode * next() const { return next_; }
-};
-
-
-// List is a simple singly-linked list container.
-template <typename E> // E is the element type.
-class List {
- public:
-
- // Creates an empty list.
- List() : head_(NULL), last_(NULL), size_(0) {}
-
- // D'tor.
- virtual ~List();
-
- // Clears the list.
- void Clear() {
- if ( size_ > 0 ) {
- // 1. Deletes every node.
- ListNode<E> * node = head_;
- ListNode<E> * next = node->next();
- for ( ; ; ) {
- delete node;
- node = next;
- if ( node == NULL ) break;
- next = node->next();
- }
-
- // 2. Resets the member variables.
- head_ = last_ = NULL;
- size_ = 0;
- }
- }
-
- // Gets the number of elements.
- int size() const { return size_; }
-
- // Returns true if the list is empty.
- bool IsEmpty() const { return size() == 0; }
-
- // Gets the first element of the list, or NULL if the list is empty.
- ListNode<E> * Head() { return head_; }
- const ListNode<E> * Head() const { return head_; }
-
- // Gets the last element of the list, or NULL if the list is empty.
- ListNode<E> * Last() { return last_; }
- const ListNode<E> * Last() const { return last_; }
-
- // Adds an element to the end of the list. A copy of the element is
- // created using the copy constructor, and then stored in the list.
- // Changes made to the element in the list doesn't affect the source
- // object, and vice versa.
- void PushBack(const E & element) {
- ListNode<E> * new_node = new ListNode<E>(element);
-
- if ( size_ == 0 ) {
- head_ = last_ = new_node;
- size_ = 1;
- } else {
- last_->next_ = new_node;
- last_ = new_node;
- size_++;
- }
- }
-
- // Adds an element to the beginning of this list.
- void PushFront(const E& element) {
- ListNode<E>* const new_node = new ListNode<E>(element);
-
- if ( size_ == 0 ) {
- head_ = last_ = new_node;
- size_ = 1;
- } else {
- new_node->next_ = head_;
- head_ = new_node;
- size_++;
- }
- }
-
- // Removes an element from the beginning of this list. If the
- // result argument is not NULL, the removed element is stored in the
- // memory it points to. Otherwise the element is thrown away.
- // Returns true iff the list wasn't empty before the operation.
- bool PopFront(E* result) {
- if (size_ == 0) return false;
-
- if (result != NULL) {
- *result = head_->element_;
- }
-
- ListNode<E>* const old_head = head_;
- size_--;
- if (size_ == 0) {
- head_ = last_ = NULL;
- } else {
- head_ = head_->next_;
- }
- delete old_head;
-
- return true;
- }
-
- // Inserts an element after a given node in the list. It's the
- // caller's responsibility to ensure that the given node is in the
- // list. If the given node is NULL, inserts the element at the
- // front of the list.
- ListNode<E>* InsertAfter(ListNode<E>* node, const E& element) {
- if (node == NULL) {
- PushFront(element);
- return Head();
- }
-
- ListNode<E>* const new_node = new ListNode<E>(element);
- new_node->next_ = node->next_;
- node->next_ = new_node;
- size_++;
- if (node == last_) {
- last_ = new_node;
- }
-
- return new_node;
- }
-
- // Returns the number of elements that satisfy a given predicate.
- // The parameter 'predicate' is a Boolean function or functor that
- // accepts a 'const E &', where E is the element type.
- template <typename P> // P is the type of the predicate function/functor
- int CountIf(P predicate) const {
- int count = 0;
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element()) ) {
- count++;
- }
- }
-
- return count;
- }
-
- // Applies a function/functor to each element in the list. The
- // parameter 'functor' is a function/functor that accepts a 'const
- // E &', where E is the element type. This method does not change
- // the elements.
- template <typename F> // F is the type of the function/functor
- void ForEach(F functor) const {
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- functor(node->element());
- }
- }
-
- // Returns the first node whose element satisfies a given predicate,
- // or NULL if none is found. The parameter 'predicate' is a
- // function/functor that accepts a 'const E &', where E is the
- // element type. This method does not change the elements.
- template <typename P> // P is the type of the predicate function/functor.
- const ListNode<E> * FindIf(P predicate) const {
- for ( const ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element()) ) {
- return node;
- }
- }
+GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+ const char* shard_index_str,
+ bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+ int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+ return static_cast<int>(std::count_if(c.begin(), c.end(), predicate));
+}
- return NULL;
- }
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+ std::for_each(c.begin(), c.end(), functor);
+}
- template <typename P>
- ListNode<E> * FindIf(P predicate) {
- for ( ListNode<E> * node = Head();
- node != NULL;
- node = node->next() ) {
- if ( predicate(node->element() ) ) {
- return node;
- }
- }
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+ return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
- return NULL;
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+ std::vector<E>* v) {
+ const int size = static_cast<int>(v->size());
+ GTEST_CHECK_(0 <= begin && begin <= size)
+ << "Invalid shuffle range start " << begin << ": must be in range [0, "
+ << size << "].";
+ GTEST_CHECK_(begin <= end && end <= size)
+ << "Invalid shuffle range finish " << end << ": must be in range ["
+ << begin << ", " << size << "].";
+
+ // Fisher-Yates shuffle, from
+ // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ for (int range_width = end - begin; range_width >= 2; range_width--) {
+ const int last_in_range = begin + range_width - 1;
+ const int selected = begin + random->Generate(range_width);
+ std::swap((*v)[selected], (*v)[last_in_range]);
}
+}
- private:
- ListNode<E>* head_; // The first node of the list.
- ListNode<E>* last_; // The last node of the list.
- int size_; // The number of elements in the list.
-
- // We disallow copying List.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(List);
-};
-
-// The virtual destructor of List.
+// Performs an in-place shuffle of the vector's elements.
template <typename E>
-List<E>::~List() {
- Clear();
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+ ShuffleRange(random, 0, static_cast<int>(v->size()), v);
}
// A function for deleting an object. Handy for being used as a
// functor.
template <typename T>
-static void Delete(T * x) {
+static void Delete(T* x) {
delete x;
}
-// A copyable object representing a user specified test property which can be
-// output as a key/value string pair.
-//
-// Don't inherit from TestProperty as its destructor is not virtual.
-class TestProperty {
- public:
- // C'tor. TestProperty does NOT have a default constructor.
- // Always use this constructor (with parameters) to create a
- // TestProperty object.
- TestProperty(const char* key, const char* value) :
- key_(key), value_(value) {
- }
-
- // Gets the user supplied key.
- const char* key() const {
- return key_.c_str();
- }
-
- // Gets the user supplied value.
- const char* value() const {
- return value_.c_str();
- }
-
- // Sets a new value, overriding the one supplied in the constructor.
- void SetValue(const char* new_value) {
- value_ = new_value;
- }
-
- private:
- // The key supplied by the user.
- String key_;
- // The value supplied by the user.
- String value_;
-};
-
// A predicate that checks the key of a TestProperty against a known key.
//
// TestPropertyKeyIs is copyable.
@@ -473,96 +340,6 @@ class TestPropertyKeyIs {
String key_;
};
-// The result of a single Test. This includes a list of
-// TestPartResults, a list of TestProperties, a count of how many
-// death tests there are in the Test, and how much time it took to run
-// the Test.
-//
-// TestResult is not copyable.
-class TestResult {
- public:
- // Creates an empty TestResult.
- TestResult();
-
- // D'tor. Do not inherit from TestResult.
- ~TestResult();
-
- // Gets the list of TestPartResults.
- const internal::List<TestPartResult> & test_part_results() const {
- return test_part_results_;
- }
-
- // Gets the list of TestProperties.
- const internal::List<internal::TestProperty> & test_properties() const {
- return test_properties_;
- }
-
- // Gets the number of successful test parts.
- int successful_part_count() const;
-
- // Gets the number of failed test parts.
- int failed_part_count() const;
-
- // Gets the number of all test parts. This is the sum of the number
- // of successful test parts and the number of failed test parts.
- int total_part_count() const;
-
- // Returns true iff the test passed (i.e. no test part failed).
- bool Passed() const { return !Failed(); }
-
- // Returns true iff the test failed.
- bool Failed() const { return failed_part_count() > 0; }
-
- // Returns true iff the test fatally failed.
- bool HasFatalFailure() const;
-
- // Returns the elapsed time, in milliseconds.
- TimeInMillis elapsed_time() const { return elapsed_time_; }
-
- // Sets the elapsed time.
- void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
-
- // Adds a test part result to the list.
- void AddTestPartResult(const TestPartResult& test_part_result);
-
- // Adds a test property to the list. The property is validated and may add
- // a non-fatal failure if invalid (e.g., if it conflicts with reserved
- // key names). If a property is already recorded for the same key, the
- // value will be updated, rather than storing multiple values for the same
- // key.
- void RecordProperty(const internal::TestProperty& test_property);
-
- // Adds a failure if the key is a reserved attribute of Google Test
- // testcase tags. Returns true if the property is valid.
- // TODO(russr): Validate attribute names are legal and human readable.
- static bool ValidateTestProperty(const internal::TestProperty& test_property);
-
- // Returns the death test count.
- int death_test_count() const { return death_test_count_; }
-
- // Increments the death test count, returning the new count.
- int increment_death_test_count() { return ++death_test_count_; }
-
- // Clears the object.
- void Clear();
- private:
- // Protects mutable state of the property list and of owned properties, whose
- // values may be updated.
- internal::Mutex test_properites_mutex_;
-
- // The list of TestPartResults
- internal::List<TestPartResult> test_part_results_;
- // The list of TestProperties
- internal::List<internal::TestProperty> test_properties_;
- // Running count of death tests.
- int death_test_count_;
- // The elapsed time, in milliseconds.
- TimeInMillis elapsed_time_;
-
- // We disallow copying TestResult.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
-}; // class TestResult
-
class TestInfoImpl {
public:
TestInfoImpl(TestInfo* parent, const char* test_case_name,
@@ -583,6 +360,12 @@ class TestInfoImpl {
// Sets the is_disabled member.
void set_is_disabled(bool is) { is_disabled_ = is; }
+ // Returns true if this test matches the filter specified by the user.
+ bool matches_filter() const { return matches_filter_; }
+
+ // Sets the matches_filter member.
+ void set_matches_filter(bool matches) { matches_filter_ = matches; }
+
// Returns the test case name.
const char* test_case_name() const { return test_case_name_.c_str(); }
@@ -599,18 +382,13 @@ class TestInfoImpl {
TypeId fixture_class_id() const { return fixture_class_id_; }
// Returns the test result.
- internal::TestResult* result() { return &result_; }
- const internal::TestResult* result() const { return &result_; }
+ TestResult* result() { return &result_; }
+ const TestResult* result() const { return &result_; }
// Creates the test object, runs it, records its result, and then
// deletes it.
void Run();
- // Calls the given TestInfo object's Run() method.
- static void RunTest(TestInfo * test_info) {
- test_info->impl()->Run();
- }
-
// Clears the test result.
void ClearResult() { result_.Clear(); }
@@ -629,150 +407,18 @@ class TestInfoImpl {
const TypeId fixture_class_id_; // ID of the test fixture class
bool should_run_; // True iff this test should run
bool is_disabled_; // True iff this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
internal::TestFactoryBase* const factory_; // The factory that creates
// the test object
// This field is mutable and needs to be reset before running the
// test for the second time.
- internal::TestResult result_;
+ TestResult result_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfoImpl);
};
-} // namespace internal
-
-// A test case, which consists of a list of TestInfos.
-//
-// TestCase is not copyable.
-class TestCase {
- public:
- // Creates a TestCase with the given name.
- //
- // TestCase does NOT have a default constructor. Always use this
- // constructor to create a TestCase object.
- //
- // Arguments:
- //
- // name: name of the test case
- // set_up_tc: pointer to the function that sets up the test case
- // tear_down_tc: pointer to the function that tears down the test case
- TestCase(const char* name, const char* comment,
- Test::SetUpTestCaseFunc set_up_tc,
- Test::TearDownTestCaseFunc tear_down_tc);
-
- // Destructor of TestCase.
- virtual ~TestCase();
-
- // Gets the name of the TestCase.
- const char* name() const { return name_.c_str(); }
-
- // Returns the test case comment.
- const char* comment() const { return comment_.c_str(); }
-
- // Returns true if any test in this test case should run.
- bool should_run() const { return should_run_; }
-
- // Sets the should_run member.
- void set_should_run(bool should) { should_run_ = should; }
-
- // Gets the (mutable) list of TestInfos in this TestCase.
- internal::List<TestInfo*>& test_info_list() { return *test_info_list_; }
-
- // Gets the (immutable) list of TestInfos in this TestCase.
- const internal::List<TestInfo *> & test_info_list() const {
- return *test_info_list_;
- }
-
- // Gets the number of successful tests in this test case.
- int successful_test_count() const;
-
- // Gets the number of failed tests in this test case.
- int failed_test_count() const;
-
- // Gets the number of disabled tests in this test case.
- int disabled_test_count() const;
-
- // Get the number of tests in this test case that should run.
- int test_to_run_count() const;
-
- // Gets the number of all tests in this test case.
- int total_test_count() const;
-
- // Returns true iff the test case passed.
- bool Passed() const { return !Failed(); }
-
- // Returns true iff the test case failed.
- bool Failed() const { return failed_test_count() > 0; }
-
- // Returns the elapsed time, in milliseconds.
- internal::TimeInMillis elapsed_time() const { return elapsed_time_; }
-
- // Adds a TestInfo to this test case. Will delete the TestInfo upon
- // destruction of the TestCase object.
- void AddTestInfo(TestInfo * test_info);
-
- // Finds and returns a TestInfo with the given name. If one doesn't
- // exist, returns NULL.
- TestInfo* GetTestInfo(const char* test_name);
-
- // Clears the results of all tests in this test case.
- void ClearResult();
-
- // Clears the results of all tests in the given test case.
- static void ClearTestCaseResult(TestCase* test_case) {
- test_case->ClearResult();
- }
-
- // Runs every test in this TestCase.
- void Run();
-
- // Runs every test in the given TestCase.
- static void RunTestCase(TestCase * test_case) { test_case->Run(); }
-
- // Returns true iff test passed.
- static bool TestPassed(const TestInfo * test_info) {
- const internal::TestInfoImpl* const impl = test_info->impl();
- return impl->should_run() && impl->result()->Passed();
- }
-
- // Returns true iff test failed.
- static bool TestFailed(const TestInfo * test_info) {
- const internal::TestInfoImpl* const impl = test_info->impl();
- return impl->should_run() && impl->result()->Failed();
- }
-
- // Returns true iff test is disabled.
- static bool TestDisabled(const TestInfo * test_info) {
- return test_info->impl()->is_disabled();
- }
-
- // Returns true if the given test should run.
- static bool ShouldRunTest(const TestInfo *test_info) {
- return test_info->impl()->should_run();
- }
-
- private:
- // Name of the test case.
- internal::String name_;
- // Comment on the test case.
- internal::String comment_;
- // List of TestInfos.
- internal::List<TestInfo*>* test_info_list_;
- // Pointer to the function that sets up the test case.
- Test::SetUpTestCaseFunc set_up_tc_;
- // Pointer to the function that tears down the test case.
- Test::TearDownTestCaseFunc tear_down_tc_;
- // True iff any test in this test case should run.
- bool should_run_;
- // Elapsed time, in milliseconds.
- internal::TimeInMillis elapsed_time_;
-
- // We disallow copying TestCases.
- GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
-};
-
-namespace internal {
-
// Class UnitTestOptions.
//
// This class contains functions for processing options the user
@@ -783,16 +429,17 @@ namespace internal {
// test filter using either GTEST_FILTER or --gtest_filter. If both
// the variable and the flag are present, the latter overrides the
// former.
-class UnitTestOptions {
+class GTEST_API_ UnitTestOptions {
public:
// Functions for processing the gtest_output flag.
// Returns the output format, or "" for normal printed output.
static String GetOutputFormat();
- // Returns the name of the requested output file, or the default if none
- // was explicitly specified.
- static String GetOutputFile();
+ // Returns the absolute path of the requested output file, or the
+ // default (test_detail.xml in the original working directory) if
+ // none was explicitly specified.
+ static String GetAbsolutePathToOutputFile();
// Functions for processing the gtest_filter flag.
@@ -808,7 +455,7 @@ class UnitTestOptions {
static bool FilterMatchesTest(const String &test_case_name,
const String &test_name);
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// Function for supporting the gtest_catch_exception flag.
// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
@@ -824,7 +471,7 @@ class UnitTestOptions {
// Returns the current application's name, removing directory path if that
// is present. Used by UnitTestOptions::GetOutputFile.
-FilePath GetCurrentExecutableName();
+GTEST_API_ FilePath GetCurrentExecutableName();
// The role interface for getting the OS stack trace as a string.
class OsStackTraceGetterInterface {
@@ -852,7 +499,7 @@ class OsStackTraceGetterInterface {
// A working implementation of the OsStackTraceGetterInterface interface.
class OsStackTraceGetter : public OsStackTraceGetterInterface {
public:
- OsStackTraceGetter() {}
+ OsStackTraceGetter() : caller_frame_(NULL) {}
virtual String CurrentStackTrace(int max_depth, int skip_count);
virtual void UponLeavingGTest();
@@ -891,6 +538,8 @@ class DefaultGlobalTestPartResultReporter
private:
UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
};
// This is the default per thread test part result reporter used in
@@ -905,13 +554,15 @@ class DefaultPerThreadTestPartResultReporter
private:
UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
};
// The private implementation of the UnitTest class. We don't protect
// the methods under a mutex, as this class is not accessible by a
// user and the UnitTest class that delegates work to this class does
// proper locking.
-class UnitTestImpl {
+class GTEST_API_ UnitTestImpl {
public:
explicit UnitTestImpl(UnitTest* parent);
virtual ~UnitTestImpl();
@@ -977,26 +628,29 @@ class UnitTestImpl {
return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
}
- // Returns the TestResult for the test that's currently running, or
- // the TestResult for the ad hoc test if no test is running.
- internal::TestResult* current_test_result();
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[i];
+ }
- // Returns the TestResult for the ad hoc test.
- const internal::TestResult* ad_hoc_test_result() const {
- return &ad_hoc_test_result_;
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i) {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[index];
}
- // Sets the unit test result printer.
- //
- // Does nothing if the input and the current printer object are the
- // same; otherwise, deletes the old printer object and makes the
- // input the current printer.
- void set_result_printer(UnitTestEventListenerInterface * result_printer);
+ // Provides access to the event listener list.
+ TestEventListeners* listeners() { return &listeners_; }
+
+ // Returns the TestResult for the test that's currently running, or
+ // the TestResult for the ad hoc test if no test is running.
+ TestResult* current_test_result();
- // Returns the current unit test result printer if it is not NULL;
- // otherwise, creates an appropriate result printer, makes it the
- // current printer, and returns it.
- UnitTestEventListenerInterface* result_printer();
+ // Returns the TestResult for the ad hoc test.
+ const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
// Sets the OS stack trace getter.
//
@@ -1054,10 +708,8 @@ class UnitTestImpl {
// before main() is reached.
if (original_working_dir_.IsEmpty()) {
original_working_dir_.Set(FilePath::GetCurrentDir());
- if (original_working_dir_.IsEmpty()) {
- printf("%s\n", "Failed to get the current working directory.");
- abort();
- }
+ GTEST_CHECK_(!original_working_dir_.IsEmpty())
+ << "Failed to get the current working directory.";
}
GetTestCase(test_info->test_case_name(),
@@ -1066,7 +718,7 @@ class UnitTestImpl {
tear_down_tc)->AddTestInfo(test_info);
}
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
// Returns ParameterizedTestCaseRegistry object used to keep track of
// value-parameterized tests and instantiate and register them.
internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
@@ -1075,15 +727,15 @@ class UnitTestImpl {
#endif // GTEST_HAS_PARAM_TEST
// Sets the TestCase object for the test that's currently running.
- void set_current_test_case(TestCase* current_test_case) {
- current_test_case_ = current_test_case;
+ void set_current_test_case(TestCase* a_current_test_case) {
+ current_test_case_ = a_current_test_case;
}
// Sets the TestInfo object for the test that's currently running. If
// current_test_info is NULL, the assertion results will be stored in
// ad_hoc_test_result_.
- void set_current_test_info(TestInfo* current_test_info) {
- current_test_info_ = current_test_info;
+ void set_current_test_info(TestInfo* a_current_test_info) {
+ current_test_info_ = a_current_test_info;
}
// Registers all parameterized tests defined using TEST_P and
@@ -1104,45 +756,50 @@ class UnitTestImpl {
// Clears the results of all tests, including the ad hoc test.
void ClearResult() {
- test_cases_.ForEach(TestCase::ClearTestCaseResult);
+ ForEach(test_cases_, TestCase::ClearTestCaseResult);
ad_hoc_test_result_.Clear();
}
+ enum ReactionToSharding {
+ HONOR_SHARDING_PROTOCOL,
+ IGNORE_SHARDING_PROTOCOL
+ };
+
// Matches the full name of each test against the user-specified
// filter to decide whether the test should run, then records the
// result in each TestCase and TestInfo object.
+ // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+ // based on sharding variables in the environment.
// Returns the number of tests that should run.
- int FilterTests();
+ int FilterTests(ReactionToSharding shard_tests);
- // Lists all the tests by name.
- void ListAllTests();
+ // Prints the names of the tests matching the user-specified filter flag.
+ void ListTestsMatchingFilter();
const TestCase* current_test_case() const { return current_test_case_; }
TestInfo* current_test_info() { return current_test_info_; }
const TestInfo* current_test_info() const { return current_test_info_; }
- // Returns the list of environments that need to be set-up/torn-down
+ // Returns the vector of environments that need to be set-up/torn-down
// before/after the tests are run.
- internal::List<Environment*>* environments() { return &environments_; }
- internal::List<Environment*>* environments_in_reverse_order() {
- return &environments_in_reverse_order_;
- }
-
- internal::List<TestCase*>* test_cases() { return &test_cases_; }
- const internal::List<TestCase*>* test_cases() const { return &test_cases_; }
+ std::vector<Environment*>& environments() { return environments_; }
// Getters for the per-thread Google Test trace stack.
- internal::List<TraceInfo>* gtest_trace_stack() {
- return gtest_trace_stack_.pointer();
+ std::vector<TraceInfo>& gtest_trace_stack() {
+ return *(gtest_trace_stack_.pointer());
}
- const internal::List<TraceInfo>* gtest_trace_stack() const {
- return gtest_trace_stack_.pointer();
+ const std::vector<TraceInfo>& gtest_trace_stack() const {
+ return gtest_trace_stack_.get();
}
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
+ void InitDeathTestSubprocessControlInfo() {
+ internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+ }
// Returns a pointer to the parsed --gtest_internal_run_death_test
// flag, or NULL if that flag was not specified.
// This information is useful only in a death test child process.
+ // Must not be called before a call to InitGoogleTest.
const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
return internal_run_death_test_flag_.get();
}
@@ -1152,9 +809,35 @@ class UnitTestImpl {
return death_test_factory_.get();
}
+ void SuppressTestEventsIfInSubprocess();
+
friend class ReplaceDeathTestFactory;
#endif // GTEST_HAS_DEATH_TEST
+ // Initializes the event listener performing XML output as specified by
+ // UnitTestOptions. Must not be called before InitGoogleTest.
+ void ConfigureXmlOutput();
+
+ // Performs initialization dependent upon flag values obtained in
+ // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+ // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+ // this function is also called from RunAllTests. Since this function can be
+ // called more than once, it has to be idempotent.
+ void PostFlagParsingInit();
+
+ // Gets the random seed used at the start of the current test iteration.
+ int random_seed() const { return random_seed_; }
+
+ // Gets the random number generator.
+ internal::Random* random() { return &random_; }
+
+ // Shuffles all test cases, and the tests within each test case,
+ // making sure that death tests are still run first.
+ void ShuffleTests();
+
+ // Restores the test cases and tests to their order before the first shuffle.
+ void UnshuffleTests();
+
private:
friend class ::testing::UnitTest;
@@ -1180,15 +863,21 @@ class UnitTestImpl {
internal::ThreadLocal<TestPartResultReporterInterface*>
per_thread_test_part_result_reporter_;
- // The list of environments that need to be set-up/torn-down
- // before/after the tests are run. environments_in_reverse_order_
- // simply mirrors environments_ in reverse order.
- internal::List<Environment*> environments_;
- internal::List<Environment*> environments_in_reverse_order_;
+ // The vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*> environments_;
+
+ // The vector of TestCases in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestCase*> test_cases_;
- internal::List<TestCase*> test_cases_; // The list of TestCases.
+ // Provides a level of indirection for the test case list to allow
+ // easy shuffling and restoring the test case order. The i-th
+ // element of this vector is the index of the i-th test case in the
+ // shuffled order.
+ std::vector<int> test_case_indices_;
-#ifdef GTEST_HAS_PARAM_TEST
+#if GTEST_HAS_PARAM_TEST
// ParameterizedTestRegistry object used to register value-parameterized
// tests.
internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
@@ -1197,13 +886,13 @@ class UnitTestImpl {
bool parameterized_tests_registered_;
#endif // GTEST_HAS_PARAM_TEST
- // Points to the last death test case registered. Initially NULL.
- internal::ListNode<TestCase*>* last_death_test_case_;
+ // Index of the last death test case registered. Initially -1.
+ int last_death_test_case_;
// This points to the TestCase for the currently running test. It
// changes as Google Test goes through one test case after another.
// When no test is running, this is set to NULL and Google Test
- // stores assertion results in ad_hoc_test_result_. Initally NULL.
+ // stores assertion results in ad_hoc_test_result_. Initially NULL.
TestCase* current_test_case_;
// This points to the TestInfo for the currently running test. It
@@ -1220,13 +909,11 @@ class UnitTestImpl {
// If an assertion is encountered when no TEST or TEST_F is running,
// Google Test attributes the assertion result to an imaginary "ad hoc"
// test, and records the result in ad_hoc_test_result_.
- internal::TestResult ad_hoc_test_result_;
+ TestResult ad_hoc_test_result_;
- // The unit test result printer. Will be deleted when the UnitTest
- // object is destructed. By default, a plain text printer is used,
- // but the user can set this field to use a custom printer if that
- // is desired.
- UnitTestEventListenerInterface* result_printer_;
+ // The list of event listeners that can be used to track events inside
+ // Google Test.
+ TestEventListeners listeners_;
// The OS stack trace getter. Will be deleted when the UnitTest
// object is destructed. By default, an OsStackTraceGetter is used,
@@ -1234,10 +921,19 @@ class UnitTestImpl {
// desired.
OsStackTraceGetterInterface* os_stack_trace_getter_;
+ // True iff PostFlagParsingInit() has been called.
+ bool post_flag_parse_init_performed_;
+
+ // The random number seed used at the beginning of the test run.
+ int random_seed_;
+
+ // Our random number generator.
+ internal::Random random_;
+
// How long the test took to run, in milliseconds.
TimeInMillis elapsed_time_;
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
// The decomposed components of the gtest_internal_run_death_test flag,
// parsed when RUN_ALL_TESTS is called.
internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
@@ -1245,7 +941,7 @@ class UnitTestImpl {
#endif // GTEST_HAS_DEATH_TEST
// A per-thread stack of traces created by the SCOPED_TRACE() macro.
- internal::ThreadLocal<internal::List<TraceInfo> > gtest_trace_stack_;
+ internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
}; // class UnitTestImpl
@@ -1256,10 +952,121 @@ inline UnitTestImpl* GetUnitTestImpl() {
return UnitTest::GetInstance()->impl();
}
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsDigit(char ch);
+GTEST_API_ bool IsPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsWhiteSpace(char ch);
+GTEST_API_ bool IsWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
// Parses the command line for Google Test flags, without initializing
// other parts of Google Test.
-void ParseGoogleTestFlagsOnly(int* argc, char** argv);
-void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+String GetLastErrnoDescription();
+
+#if GTEST_OS_WINDOWS
+// Provides leak-safe Windows kernel handle ownership.
+class AutoHandle {
+ public:
+ AutoHandle() : handle_(INVALID_HANDLE_VALUE) {}
+ explicit AutoHandle(HANDLE handle) : handle_(handle) {}
+
+ ~AutoHandle() { Reset(); }
+
+ HANDLE Get() const { return handle_; }
+ void Reset() { Reset(INVALID_HANDLE_VALUE); }
+ void Reset(HANDLE handle) {
+ if (handle != handle_) {
+ if (handle_ != INVALID_HANDLE_VALUE)
+ ::CloseHandle(handle_);
+ handle_ = handle;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+#endif // GTEST_OS_WINDOWS
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter. Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+ // Fail fast if the given string does not begin with a digit;
+ // this bypasses strtoXXX's "optional leading whitespace and plus
+ // or minus sign" semantics, which are undesirable here.
+ if (str.empty() || !isdigit(str[0])) {
+ return false;
+ }
+ errno = 0;
+
+ char* end;
+ // BiggestConvertible is the largest integer type that system-provided
+ // string-to-number conversion routines can return.
+#if GTEST_OS_WINDOWS && !defined(__GNUC__)
+ // MSVC and C++ Builder define __int64 instead of the standard long long.
+ typedef unsigned __int64 BiggestConvertible;
+ const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+#else
+ typedef unsigned long long BiggestConvertible; // NOLINT
+ const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+#endif // GTEST_OS_WINDOWS && !defined(__GNUC__)
+ const bool parse_success = *end == '\0' && errno == 0;
+
+ // TODO(vladl@google.com): Convert this to compile time assertion when it is
+ // available.
+ GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+ const Integer result = static_cast<Integer>(parsed);
+ if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+ *number = result;
+ return true;
+ }
+ return false;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+ static void RecordProperty(TestResult* test_result,
+ const TestProperty& property) {
+ test_result->RecordProperty(property);
+ }
+
+ static void ClearTestPartResults(TestResult* test_result) {
+ test_result->ClearTestPartResults();
+ }
+
+ static const std::vector<testing::TestPartResult>& test_part_results(
+ const TestResult& test_result) {
+ return test_result.test_part_results();
+ }
+};
} // namespace internal
} // namespace testing
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h
index 242ffea..0b90132 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-internal.h
@@ -39,7 +39,7 @@
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_OS_LINUX
+#if GTEST_OS_LINUX
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -121,32 +121,26 @@ namespace testing {
// Forward declaration of classes.
+class AssertionResult; // Result of an assertion.
class Message; // Represents a failure message.
class Test; // Represents a test.
-class TestCase; // A collection of related tests.
-class TestPartResult; // Result of a test part.
class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
class UnitTest; // A collection of test cases.
-class UnitTestEventListenerInterface; // Listens to Google Test events.
-class AssertionResult; // Result of an assertion.
namespace internal {
struct TraceInfo; // Information about a trace point.
class ScopedTrace; // Implements scoped trace.
class TestInfoImpl; // Opaque implementation of TestInfo
-class TestResult; // Result of a single Test.
class UnitTestImpl; // Opaque implementation of UnitTest
-template <typename E> class List; // A generic list.
-template <typename E> class ListNode; // A node in a generic list.
-
// How many times InitGoogleTest() has been called.
extern int g_init_gtest_count;
// The text used in failure messages to indicate the start of the
// stack trace.
-extern const char kStackTraceMarker[];
+GTEST_API_ extern const char kStackTraceMarker[];
// A secret type that Google Test users don't know about. It has no
// definition on purpose. Therefore it's impossible to create a
@@ -173,24 +167,21 @@ char (&IsNullLiteralHelper(...))[2]; // NOLINT
// A compile-time bool constant that is true if and only if x is a
// null pointer literal (i.e. NULL or any 0-valued compile-time
// integral constant).
-#ifdef GTEST_ELLIPSIS_NEEDS_COPY_
-// Passing non-POD classes through ellipsis (...) crashes the ARM
-// compiler. The Nokia Symbian and the IBM XL C/C++ compiler try to
-// instantiate a copy constructor for objects passed through ellipsis
-// (...), failing for uncopyable objects. Hence we define this to
-// false (and lose support for NULL detection).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
#define GTEST_IS_NULL_LITERAL_(x) false
#else
#define GTEST_IS_NULL_LITERAL_(x) \
(sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
-#endif // GTEST_ELLIPSIS_NEEDS_COPY_
+#endif // GTEST_ELLIPSIS_NEEDS_POD_
// Appends the user-supplied message to the Google-Test-generated message.
-String AppendUserMessage(const String& gtest_msg,
- const Message& user_msg);
+GTEST_API_ String AppendUserMessage(const String& gtest_msg,
+ const Message& user_msg);
// A helper class for creating scoped traces in user programs.
-class ScopedTrace {
+class GTEST_API_ ScopedTrace {
public:
// The c'tor pushes the given source file location and message onto
// a trace stack maintained by Google Test.
@@ -231,13 +222,13 @@ String StreamableToString(const T& streamable);
// This overload makes sure that all pointers (including
// those to char or wchar_t) are printed as raw pointers.
template <typename T>
-inline String FormatValueForFailureMessage(internal::true_type dummy,
+inline String FormatValueForFailureMessage(internal::true_type /*dummy*/,
T* pointer) {
return StreamableToString(static_cast<const void*>(pointer));
}
template <typename T>
-inline String FormatValueForFailureMessage(internal::false_type dummy,
+inline String FormatValueForFailureMessage(internal::false_type /*dummy*/,
const T& value) {
return StreamableToString(value);
}
@@ -269,8 +260,8 @@ inline String FormatForFailureMessage(T* pointer) {
#endif // GTEST_NEEDS_IS_POINTER_
// These overloaded versions handle narrow and wide characters.
-String FormatForFailureMessage(char ch);
-String FormatForFailureMessage(wchar_t wchar);
+GTEST_API_ String FormatForFailureMessage(char ch);
+GTEST_API_ String FormatForFailureMessage(wchar_t wchar);
// When this operand is a const char* or char*, and the other operand
// is a ::std::string or ::string, we print this operand as a C string
@@ -287,9 +278,7 @@ inline String FormatForComparisonFailureMessage(\
return operand1_printer(str);\
}
-#if GTEST_HAS_STD_STRING
GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted)
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_STD_WSTRING
GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted)
#endif // GTEST_HAS_STD_WSTRING
@@ -318,12 +307,18 @@ GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted)
// The ignoring_case parameter is true iff the assertion is a
// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
// be inserted into the message.
-AssertionResult EqFailure(const char* expected_expression,
- const char* actual_expression,
- const String& expected_value,
- const String& actual_value,
- bool ignoring_case);
-
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const String& expected_value,
+ const String& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ String GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
// This template class represents an IEEE floating-point number
// (either single-precision or double-precision, depending on the
@@ -403,7 +398,7 @@ class FloatingPoint {
// around may change its bits, although the new value is guaranteed
// to be also a NAN. Therefore, don't expect this constructor to
// preserve the bits in x when x is a NAN.
- explicit FloatingPoint(const RawType& x) : value_(x) {}
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
// Static methods
@@ -412,8 +407,8 @@ class FloatingPoint {
// This function is needed to test the AlmostEquals() method.
static RawType ReinterpretBits(const Bits bits) {
FloatingPoint fp(0);
- fp.bits_ = bits;
- return fp.value_;
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
}
// Returns the floating-point number that represent positive infinity.
@@ -424,16 +419,16 @@ class FloatingPoint {
// Non-static methods
// Returns the bits that represents this number.
- const Bits &bits() const { return bits_; }
+ const Bits &bits() const { return u_.bits_; }
// Returns the exponent bits of this number.
- Bits exponent_bits() const { return kExponentBitMask & bits_; }
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
// Returns the fraction bits of this number.
- Bits fraction_bits() const { return kFractionBitMask & bits_; }
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
// Returns the sign bit of this number.
- Bits sign_bit() const { return kSignBitMask & bits_; }
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
// Returns true iff this is NAN (not a number).
bool is_nan() const {
@@ -453,10 +448,17 @@ class FloatingPoint {
// a NAN must return false.
if (is_nan() || rhs.is_nan()) return false;
- return DistanceBetweenSignAndMagnitudeNumbers(bits_, rhs.bits_) <= kMaxUlps;
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
}
private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
// Converts an integer from the sign-and-magnitude representation to
// the biased representation. More precisely, let N be 2 to the
// power of (kBitCount - 1), an integer x is represented by the
@@ -491,10 +493,7 @@ class FloatingPoint {
return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
}
- union {
- RawType value_; // The raw floating-point number.
- Bits bits_; // The bits that represent the number.
- };
+ FloatingPointUnion u_;
};
// Typedefs the instances of the FloatingPoint template class that we
@@ -539,7 +538,7 @@ TypeId GetTypeId() {
// ::testing::Test, as the latter may give the wrong result due to a
// suspected linker bug when compiling Google Test as a Mac OS X
// framework.
-TypeId GetTestTypeId();
+GTEST_API_ TypeId GetTestTypeId();
// Defines the abstract factory interface that creates instances
// of a Test object.
@@ -566,14 +565,16 @@ class TestFactoryImpl : public TestFactoryBase {
virtual Test* CreateTest() { return new TestClass; }
};
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
// Predicate-formatters for implementing the HRESULT checking macros
// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
// We pass a long instead of HRESULT to avoid causing an
// include dependency for the HRESULT type.
-AssertionResult IsHRESULTSuccess(const char* expr, long hr); // NOLINT
-AssertionResult IsHRESULTFailure(const char* expr, long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
#endif // GTEST_OS_WINDOWS
@@ -612,7 +613,7 @@ typedef void (*TearDownTestCaseFunc)();
// factory: pointer to the factory that creates a test object.
// The newly created TestInfo instance will assume
// ownership of the factory object.
-TestInfo* MakeAndRegisterTestInfo(
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
const char* test_case_name, const char* name,
const char* test_case_comment, const char* comment,
TypeId fixture_class_id,
@@ -620,10 +621,15 @@ TestInfo* MakeAndRegisterTestInfo(
TearDownTestCaseFunc tear_down_tc,
TestFactoryBase* factory);
-#if defined(GTEST_HAS_TYPED_TEST) || defined(GTEST_HAS_TYPED_TEST_P)
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
// State of the definition of a type-parameterized test case.
-class TypedTestCasePState {
+class GTEST_API_ TypedTestCasePState {
public:
TypedTestCasePState() : registered_(false) {}
@@ -636,7 +642,8 @@ class TypedTestCasePState {
fprintf(stderr, "%s Test %s must be defined before "
"REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
FormatFileLocation(file, line).c_str(), test_name, case_name);
- abort();
+ fflush(stderr);
+ posix::Abort();
}
defined_test_names_.insert(test_name);
return true;
@@ -745,8 +752,8 @@ class TypeParameterizedTestCase {
template <GTEST_TEMPLATE_ Fixture, typename Types>
class TypeParameterizedTestCase<Fixture, Templates0, Types> {
public:
- static bool Register(const char* prefix, const char* case_name,
- const char* test_names) {
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/) {
return true;
}
};
@@ -763,10 +770,39 @@ class TypeParameterizedTestCase<Fixture, Templates0, Types> {
// For example, if Foo() calls Bar(), which in turn calls
// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
-String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, int skip_count);
+GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test,
+ int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const UInt32 kMaxRange = 1u << 31;
+
+ explicit Random(UInt32 seed) : state_(seed) {}
-// Returns the number of failed test parts in the given test result object.
-int GetFailedPartCount(const TestResult* result);
+ void Reseed(UInt32 seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ UInt32 Generate(UInt32 range);
+
+ private:
+ UInt32 state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
} // namespace internal
} // namespace testing
@@ -776,20 +812,26 @@ int GetFailedPartCount(const TestResult* result);
= ::testing::Message()
#define GTEST_FATAL_FAILURE_(message) \
- return GTEST_MESSAGE_(message, ::testing::TPRT_FATAL_FAILURE)
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
#define GTEST_NONFATAL_FAILURE_(message) \
- GTEST_MESSAGE_(message, ::testing::TPRT_NONFATAL_FAILURE)
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
#define GTEST_SUCCESS_(message) \
- GTEST_MESSAGE_(message, ::testing::TPRT_SUCCESS)
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { statement; }
#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (const char* gtest_msg = "") { \
bool gtest_caught_expected = false; \
try { \
- statement; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} \
catch (expected_exception const&) { \
gtest_caught_expected = true; \
@@ -813,7 +855,7 @@ int GetFailedPartCount(const TestResult* result);
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (const char* gtest_msg = "") { \
try { \
- statement; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} \
catch (...) { \
gtest_msg = "Expected: " #statement " doesn't throw an exception.\n" \
@@ -829,7 +871,7 @@ int GetFailedPartCount(const TestResult* result);
if (const char* gtest_msg = "") { \
bool gtest_caught_any = false; \
try { \
- statement; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} \
catch (...) { \
gtest_caught_any = true; \
@@ -844,18 +886,23 @@ int GetFailedPartCount(const TestResult* result);
fail(gtest_msg)
-#define GTEST_TEST_BOOLEAN_(boolexpr, booltext, actual, expected, fail) \
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (boolexpr) \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
; \
else \
- fail("Value of: " booltext "\n Actual: " #actual "\nExpected: " #expected)
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (const char* gtest_msg = "") { \
::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
- { statement; } \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
gtest_msg = "Expected: " #statement " doesn't generate new fatal " \
"failures in the current thread.\n" \
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h
index d4c7a39..2404ea8 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-linked_ptr.h
@@ -77,7 +77,7 @@ namespace testing {
namespace internal {
// Protects copying of all linked_ptr objects.
-extern Mutex g_linked_ptr_mutex;
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
// This is used internally by all instances of linked_ptr<>. It needs to be
// a non-template class because different types of linked_ptr<> can refer to
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h
index 17f3f7b..ab4ab56 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util-generated.h
@@ -44,13 +44,30 @@
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include <gtest/internal/gtest-param-util.h>
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_HAS_PARAM_TEST
-
-#include <gtest/internal/gtest-param-util.h>
+#if GTEST_HAS_PARAM_TEST
namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::std::iterator_traits<ForwardIterator>::value_type> ValuesIn(
+ ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
namespace internal {
// Used in the Values() function to provide polymorphic capabilities.
@@ -63,6 +80,9 @@ class ValueArray1 {
operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray1& other);
+
const T1 v1_;
};
@@ -78,6 +98,9 @@ class ValueArray2 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray2& other);
+
const T1 v1_;
const T2 v2_;
};
@@ -94,6 +117,9 @@ class ValueArray3 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray3& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -112,6 +138,9 @@ class ValueArray4 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray4& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -131,6 +160,9 @@ class ValueArray5 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray5& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -152,6 +184,9 @@ class ValueArray6 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray6& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -174,6 +209,9 @@ class ValueArray7 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray7& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -198,6 +236,9 @@ class ValueArray8 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray8& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -223,6 +264,9 @@ class ValueArray9 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray9& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -249,6 +293,9 @@ class ValueArray10 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray10& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -277,6 +324,9 @@ class ValueArray11 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray11& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -307,6 +357,9 @@ class ValueArray12 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray12& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -339,6 +392,9 @@ class ValueArray13 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray13& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -372,6 +428,9 @@ class ValueArray14 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray14& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -406,6 +465,9 @@ class ValueArray15 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray15& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -443,6 +505,9 @@ class ValueArray16 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray16& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -481,6 +546,9 @@ class ValueArray17 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray17& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -520,6 +588,9 @@ class ValueArray18 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray18& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -560,6 +631,9 @@ class ValueArray19 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray19& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -602,6 +676,9 @@ class ValueArray20 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray20& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -646,6 +723,9 @@ class ValueArray21 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray21& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -691,6 +771,9 @@ class ValueArray22 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray22& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -739,6 +822,9 @@ class ValueArray23 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray23& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -788,6 +874,9 @@ class ValueArray24 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray24& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -838,6 +927,9 @@ class ValueArray25 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray25& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -890,6 +982,9 @@ class ValueArray26 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray26& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -944,6 +1039,9 @@ class ValueArray27 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray27& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -999,6 +1097,9 @@ class ValueArray28 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray28& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1055,6 +1156,9 @@ class ValueArray29 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray29& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1113,6 +1217,9 @@ class ValueArray30 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray30& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1173,6 +1280,9 @@ class ValueArray31 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray31& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1234,6 +1344,9 @@ class ValueArray32 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray32& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1297,6 +1410,9 @@ class ValueArray33 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray33& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1361,6 +1477,9 @@ class ValueArray34 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray34& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1427,6 +1546,9 @@ class ValueArray35 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray35& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1495,6 +1617,9 @@ class ValueArray36 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray36& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1565,6 +1690,9 @@ class ValueArray37 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray37& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1636,6 +1764,9 @@ class ValueArray38 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray38& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1708,6 +1839,9 @@ class ValueArray39 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray39& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1782,6 +1916,9 @@ class ValueArray40 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray40& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1858,6 +1995,9 @@ class ValueArray41 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray41& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -1935,6 +2075,9 @@ class ValueArray42 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray42& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2013,6 +2156,9 @@ class ValueArray43 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray43& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2093,6 +2239,9 @@ class ValueArray44 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray44& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2174,6 +2323,9 @@ class ValueArray45 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray45& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2257,6 +2409,9 @@ class ValueArray46 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray46& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2343,6 +2498,9 @@ class ValueArray47 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray47& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2430,6 +2588,9 @@ class ValueArray48 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray48& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2518,6 +2679,9 @@ class ValueArray49 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray49& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2607,6 +2771,9 @@ class ValueArray50 {
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray50& other);
+
const T1 v1_;
const T2 v2_;
const T3 v3_;
@@ -2659,7 +2826,7 @@ class ValueArray50 {
const T50 v50_;
};
-#ifdef GTEST_HAS_COMBINE
+#if GTEST_HAS_COMBINE
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
//
// Generates values from the Cartesian product of values produced
@@ -2757,6 +2924,9 @@ class CartesianProductGenerator2
current2_ == end2_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -2767,11 +2937,14 @@ class CartesianProductGenerator2
const typename ParamGenerator<T2>::iterator end2_;
typename ParamGenerator<T2>::iterator current2_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator2::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator2& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
-};
+}; // class CartesianProductGenerator2
template <typename T1, typename T2, typename T3>
@@ -2879,6 +3052,9 @@ class CartesianProductGenerator3
current3_ == end3_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -2892,12 +3068,15 @@ class CartesianProductGenerator3
const typename ParamGenerator<T3>::iterator end3_;
typename ParamGenerator<T3>::iterator current3_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator3::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator3& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
const ParamGenerator<T3> g3_;
-};
+}; // class CartesianProductGenerator3
template <typename T1, typename T2, typename T3, typename T4>
@@ -3020,6 +3199,9 @@ class CartesianProductGenerator4
current4_ == end4_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -3036,13 +3218,16 @@ class CartesianProductGenerator4
const typename ParamGenerator<T4>::iterator end4_;
typename ParamGenerator<T4>::iterator current4_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator4::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator4& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
const ParamGenerator<T3> g3_;
const ParamGenerator<T4> g4_;
-};
+}; // class CartesianProductGenerator4
template <typename T1, typename T2, typename T3, typename T4, typename T5>
@@ -3177,6 +3362,9 @@ class CartesianProductGenerator5
current5_ == end5_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -3196,14 +3384,17 @@ class CartesianProductGenerator5
const typename ParamGenerator<T5>::iterator end5_;
typename ParamGenerator<T5>::iterator current5_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator5::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator5& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
const ParamGenerator<T3> g3_;
const ParamGenerator<T4> g4_;
const ParamGenerator<T5> g5_;
-};
+}; // class CartesianProductGenerator5
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@@ -3353,6 +3544,9 @@ class CartesianProductGenerator6
current6_ == end6_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -3375,7 +3569,10 @@ class CartesianProductGenerator6
const typename ParamGenerator<T6>::iterator end6_;
typename ParamGenerator<T6>::iterator current6_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator6::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator6& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
@@ -3383,7 +3580,7 @@ class CartesianProductGenerator6
const ParamGenerator<T4> g4_;
const ParamGenerator<T5> g5_;
const ParamGenerator<T6> g6_;
-};
+}; // class CartesianProductGenerator6
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@@ -3546,6 +3743,9 @@ class CartesianProductGenerator7
current7_ == end7_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -3571,7 +3771,10 @@ class CartesianProductGenerator7
const typename ParamGenerator<T7>::iterator end7_;
typename ParamGenerator<T7>::iterator current7_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator7::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator7& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
@@ -3580,7 +3783,7 @@ class CartesianProductGenerator7
const ParamGenerator<T5> g5_;
const ParamGenerator<T6> g6_;
const ParamGenerator<T7> g7_;
-};
+}; // class CartesianProductGenerator7
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@@ -3758,6 +3961,9 @@ class CartesianProductGenerator8
current8_ == end8_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -3786,7 +3992,10 @@ class CartesianProductGenerator8
const typename ParamGenerator<T8>::iterator end8_;
typename ParamGenerator<T8>::iterator current8_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator8::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator8& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
@@ -3796,7 +4005,7 @@ class CartesianProductGenerator8
const ParamGenerator<T6> g6_;
const ParamGenerator<T7> g7_;
const ParamGenerator<T8> g8_;
-};
+}; // class CartesianProductGenerator8
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@@ -3987,6 +4196,9 @@ class CartesianProductGenerator9
current9_ == end9_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -4018,7 +4230,10 @@ class CartesianProductGenerator9
const typename ParamGenerator<T9>::iterator end9_;
typename ParamGenerator<T9>::iterator current9_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator9::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator9& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
@@ -4029,7 +4244,7 @@ class CartesianProductGenerator9
const ParamGenerator<T7> g7_;
const ParamGenerator<T8> g8_;
const ParamGenerator<T9> g9_;
-};
+}; // class CartesianProductGenerator9
template <typename T1, typename T2, typename T3, typename T4, typename T5,
@@ -4233,6 +4448,9 @@ class CartesianProductGenerator10
current10_ == end10_;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<ParamType>* const base_;
// begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
// current[i]_ is the actual traversing iterator.
@@ -4267,7 +4485,10 @@ class CartesianProductGenerator10
const typename ParamGenerator<T10>::iterator end10_;
typename ParamGenerator<T10>::iterator current10_;
ParamType current_value_;
- };
+ }; // class CartesianProductGenerator10::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator10& other);
const ParamGenerator<T1> g1_;
const ParamGenerator<T2> g2_;
@@ -4279,7 +4500,7 @@ class CartesianProductGenerator10
const ParamGenerator<T8> g8_;
const ParamGenerator<T9> g9_;
const ParamGenerator<T10> g10_;
-};
+}; // class CartesianProductGenerator10
// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
@@ -4302,9 +4523,12 @@ CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder2& other);
+
const Generator1 g1_;
const Generator2 g2_;
-};
+}; // class CartesianProductHolder2
template <class Generator1, class Generator2, class Generator3>
class CartesianProductHolder3 {
@@ -4322,10 +4546,13 @@ CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder3& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
-};
+}; // class CartesianProductHolder3
template <class Generator1, class Generator2, class Generator3,
class Generator4>
@@ -4345,11 +4572,14 @@ CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder4& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
const Generator4 g4_;
-};
+}; // class CartesianProductHolder4
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5>
@@ -4370,12 +4600,15 @@ CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder5& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
const Generator4 g4_;
const Generator5 g5_;
-};
+}; // class CartesianProductHolder5
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5, class Generator6>
@@ -4399,13 +4632,16 @@ CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder6& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
const Generator4 g4_;
const Generator5 g5_;
const Generator6 g6_;
-};
+}; // class CartesianProductHolder6
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5, class Generator6, class Generator7>
@@ -4431,6 +4667,9 @@ CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder7& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
@@ -4438,7 +4677,7 @@ CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
const Generator5 g5_;
const Generator6 g6_;
const Generator7 g7_;
-};
+}; // class CartesianProductHolder7
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5, class Generator6, class Generator7,
@@ -4467,6 +4706,9 @@ CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder8& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
@@ -4475,7 +4717,7 @@ CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
const Generator6 g6_;
const Generator7 g7_;
const Generator8 g8_;
-};
+}; // class CartesianProductHolder8
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5, class Generator6, class Generator7,
@@ -4507,6 +4749,9 @@ CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder9& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
@@ -4516,7 +4761,7 @@ CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
const Generator7 g7_;
const Generator8 g8_;
const Generator9 g9_;
-};
+}; // class CartesianProductHolder9
template <class Generator1, class Generator2, class Generator3,
class Generator4, class Generator5, class Generator6, class Generator7,
@@ -4550,6 +4795,9 @@ CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
}
private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder10& other);
+
const Generator1 g1_;
const Generator2 g2_;
const Generator3 g3_;
@@ -4560,7 +4808,7 @@ CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
const Generator8 g8_;
const Generator9 g9_;
const Generator10 g10_;
-};
+}; // class CartesianProductHolder10
#endif // GTEST_HAS_COMBINE
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h
index 3bb07ec..0cbb58c 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-param-util.h
@@ -38,16 +38,14 @@
#include <utility>
#include <vector>
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include <gtest/internal/gtest-internal.h>
+#include <gtest/internal/gtest-linked_ptr.h>
#include <gtest/internal/gtest-port.h>
-#ifdef GTEST_HAS_PARAM_TEST
-
-#if GTEST_HAS_RTTI
-#include <typeinfo>
-#endif // GTEST_HAS_RTTI
-
-#include <gtest/internal/gtest-linked_ptr.h>
-#include <gtest/internal/gtest-internal.h>
+#if GTEST_HAS_PARAM_TEST
namespace testing {
namespace internal {
@@ -58,26 +56,8 @@ namespace internal {
// fixture class for the same test case. This may happen when
// TEST_P macro is used to define two tests with the same name
// but in different namespaces.
-void ReportInvalidTestCaseType(const char* test_case_name,
- const char* file, int line);
-
-// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
-//
-// Downcasts the pointer of type Base to Derived.
-// Derived must be a subclass of Base. The parameter MUST
-// point to a class of type Derived, not any subclass of it.
-// When RTTI is available, the function performs a runtime
-// check to enforce this.
-template <class Derived, class Base>
-Derived* CheckedDowncastToActualType(Base* base) {
-#if GTEST_HAS_RTTI
- GTEST_CHECK_(typeid(*base) == typeid(Derived));
- Derived* derived = dynamic_cast<Derived*>(base); // NOLINT
-#else
- Derived* derived = static_cast<Derived*>(base); // Poor man's downcast.
-#endif // GTEST_HAS_RTTI
- return derived;
-}
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line);
template <typename> class ParamGeneratorInterface;
template <typename> class ParamGenerator;
@@ -169,7 +149,7 @@ class ParamGeneratorInterface {
virtual ParamIteratorInterface<T>* End() const = 0;
};
-// Wraps ParamGeneratorInetrface<T> and provides general generator syntax
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
// compatible with the STL Container concept.
// This class implements copy initialization semantics and the contained
// ParamGeneratorInterface<T> instance is shared among all copies
@@ -245,9 +225,13 @@ class RangeGenerator : public ParamGeneratorInterface<T> {
private:
Iterator(const Iterator& other)
- : base_(other.base_), value_(other.value_), index_(other.index_),
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
step_(other.step_) {}
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
const ParamGeneratorInterface<T>* const base_;
T value_;
int index_;
@@ -263,6 +247,9 @@ class RangeGenerator : public ParamGeneratorInterface<T> {
return end_index;
}
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
const T begin_;
const T end_;
const IncrementT step_;
@@ -349,7 +336,10 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
// Use of scoped_ptr helps manage cached value's lifetime,
// which is bound by the lifespan of the iterator itself.
mutable scoped_ptr<const T> value_;
- };
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
const ContainerType container_;
}; // class ValuesInIteratorRangeGenerator
@@ -483,8 +473,8 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
// about a generator.
int AddTestCaseInstantiation(const char* instantiation_name,
GeneratorCreationFunc* func,
- const char* file,
- int line) {
+ const char* /* file */,
+ int /* line */) {
instantiations_.push_back(::std::make_pair(instantiation_name, func));
return 0; // Return value used only to run this method in namespace scope.
}
@@ -533,12 +523,12 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
// LocalTestInfo structure keeps information about a single test registered
// with TEST_P macro.
struct TestInfo {
- TestInfo(const char* test_case_base_name,
- const char* test_base_name,
- TestMetaFactoryBase<ParamType>* test_meta_factory) :
- test_case_base_name(test_case_base_name),
- test_base_name(test_base_name),
- test_meta_factory(test_meta_factory) {}
+ TestInfo(const char* a_test_case_base_name,
+ const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+ test_case_base_name(a_test_case_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory) {}
const String test_case_base_name;
const String test_base_name;
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h
index 20a95c9..9683271 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-port.h
@@ -42,6 +42,8 @@
//
// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
// is/isn't available (some systems define
// ::string, which is different to std::string).
@@ -52,32 +54,42 @@
// is/isn't available.
// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
// enabled.
-// GTEST_HAS_STD_STRING - Define it to 1/0 to indicate that
-// std::string does/doesn't work (Google Test can
-// be used where std::string is unavailable).
// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
// std::wstring does/doesn't work (Google Test can
// be used where std::wstring is unavailable).
-// GTEST_HAS_TR1_TUPLE 1 - Define it to 1/0 to indicate tr1::tuple
+// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple
// is/isn't available.
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google
+// Test's own tr1 tuple implementation should be
+// used. Unused when the user sets
+// GTEST_HAS_TR1_TUPLE to 0.
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
// This header defines the following utilities:
//
-// Macros indicating the name of the Google C++ Testing Framework project:
-// GTEST_NAME - a string literal of the project name.
-// GTEST_FLAG_PREFIX - a string literal of the prefix all Google
-// Test flag names share.
-// GTEST_FLAG_PREFIX_UPPER - a string literal of the prefix all Google
-// Test flag names share, in upper case.
-//
-// Macros indicating the current platform:
-// GTEST_OS_CYGWIN - defined iff compiled on Cygwin.
-// GTEST_OS_LINUX - defined iff compiled on Linux.
-// GTEST_OS_MAC - defined iff compiled on Mac OS X.
-// GTEST_OS_SOLARIS - defined iff compiled on Sun Solaris.
-// GTEST_OS_SYMBIAN - defined iff compiled for Symbian.
-// GTEST_OS_WINDOWS - defined iff compiled on Windows.
-// GTEST_OS_ZOS - defined iff compiled on IBM z/OS.
+// Macros indicating the current platform (defined to 1 if compiled on
+// the given platform; otherwise undefined):
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_HAIKU - Haiku
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_SYMBIAN - Symbian
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_ZOS - z/OS
//
// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
// most stable support. Since core members of the Google Test project
@@ -86,22 +98,26 @@
// googletestframework@googlegroups.com (patches for fixing them are
// even more welcome!).
//
-// Note that it is possible that none of the GTEST_OS_ macros are defined.
+// Note that it is possible that none of the GTEST_OS_* macros are defined.
//
-// Macros indicating available Google Test features:
-// GTEST_HAS_COMBINE - defined iff Combine construct is supported
-// in value-parameterized tests.
-// GTEST_HAS_DEATH_TEST - defined iff death tests are supported.
-// GTEST_HAS_PARAM_TEST - defined iff value-parameterized tests are
-// supported.
-// GTEST_HAS_TYPED_TEST - defined iff typed tests are supported.
-// GTEST_HAS_TYPED_TEST_P - defined iff type-parameterized tests are
-// supported.
+// Macros indicating available Google Test features (defined to 1 if
+// the corresponding feature is supported; otherwise undefined):
+// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized
+// tests)
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_PARAM_TEST - value-parameterized tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above two are mutually exclusive.
+// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
//
// Macros for basic C++ coding:
// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
-// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances don't have to
-// be used.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables operator=.
// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
//
@@ -128,7 +144,10 @@
// LogToStderr() - directs all log messages to stderr.
// FlushInfoLog() - flushes informational log messages.
//
-// Stderr capturing:
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
// CaptureStderr() - starts capturing stderr.
// GetCapturedStderr() - stops capturing stderr and returns the captured
// string.
@@ -151,13 +170,24 @@
// Int32FromGTestEnv() - parses an Int32 environment variable.
// StringFromGTestEnv() - parses a string environment variable.
+#include <stddef.h> // For ptrdiff_t
#include <stdlib.h>
#include <stdio.h>
-#include <iostream> // Used for GTEST_CHECK_
+#include <string.h>
+#ifndef _WIN32_WCE
+#include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#include <iostream> // NOLINT
+#include <sstream> // NOLINT
+#include <string> // NOLINT
-#define GTEST_NAME "Google Test"
-#define GTEST_FLAG_PREFIX "gtest_"
-#define GTEST_FLAG_PREFIX_UPPER "GTEST_"
+#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+#define GTEST_FLAG_PREFIX_ "gtest_"
+#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+#define GTEST_NAME_ "Google Test"
+#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
// Determines the version of gcc that is used to compile this.
#ifdef __GNUC__
@@ -168,50 +198,104 @@
// Determines the platform on which Google Test is compiled.
#ifdef __CYGWIN__
-#define GTEST_OS_CYGWIN
-#elif __SYMBIAN32__
-#define GTEST_OS_SYMBIAN
-#elif defined _MSC_VER
-// TODO(kenton@google.com): GTEST_OS_WINDOWS is currently used to mean
-// both "The OS is Windows" and "The compiler is MSVC". These
-// meanings really should be separated in order to better support
-// Windows compilers other than MSVC.
-#define GTEST_OS_WINDOWS
+#define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+#define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+#define GTEST_OS_WINDOWS 1
+#ifdef _WIN32_WCE
+#define GTEST_OS_WINDOWS_MOBILE 1
+#elif defined(__MINGW__) || defined(__MINGW32__)
+#define GTEST_OS_WINDOWS_MINGW 1
+#else
+#define GTEST_OS_WINDOWS_DESKTOP 1
+#endif // _WIN32_WCE
#elif defined __APPLE__
-#define GTEST_OS_MAC
+#define GTEST_OS_MAC 1
#elif defined __linux__
-#define GTEST_OS_LINUX
+#define GTEST_OS_LINUX 1
#elif defined __MVS__
-#define GTEST_OS_ZOS
+#define GTEST_OS_ZOS 1
#elif defined(__sun) && defined(__SVR4)
-#define GTEST_OS_SOLARIS
+#define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+#define GTEST_OS_AIX 1
#elif defined(__HAIKU__)
-#define GTEST_OS_HAIKU
-#endif // _MSC_VER
+#define GTEST_OS_HAIKU 1
+#endif // __CYGWIN__
+
+#if GTEST_OS_CYGWIN || GTEST_OS_HAIKU || GTEST_OS_LINUX || GTEST_OS_MAC || \
+ GTEST_OS_SYMBIAN || GTEST_OS_SOLARIS || GTEST_OS_AIX
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+#include <regex.h> // NOLINT
+#include <strings.h> // NOLINT
+#include <sys/types.h> // NOLINT
+#include <time.h> // NOLINT
+#include <unistd.h> // NOLINT
-// Determines whether ::std::string and ::string are available.
+#define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+#if !GTEST_OS_WINDOWS_MOBILE
+#include <direct.h> // NOLINT
+#include <io.h> // NOLINT
+#endif
-#ifndef GTEST_HAS_STD_STRING
-// The user didn't tell us whether ::std::string is available, so we
-// need to figure it out.
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+#define GTEST_USES_SIMPLE_RE 1
-#ifdef GTEST_OS_WINDOWS
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+#define GTEST_USES_SIMPLE_RE 1
+
+#endif // GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC ||
+ // GTEST_OS_SYMBIAN || GTEST_OS_SOLARIS || GTEST_OS_AIX
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
// Assumes that exceptions are enabled by default.
#ifndef _HAS_EXCEPTIONS
#define _HAS_EXCEPTIONS 1
#endif // _HAS_EXCEPTIONS
-// GTEST_HAS_EXCEPTIONS is non-zero iff exceptions are enabled. It is
-// always defined, while _HAS_EXCEPTIONS is defined only on Windows.
#define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
-// On Windows, we can use ::std::string if the compiler version is VS
-// 2005 or above, or if exceptions are enabled.
-#define GTEST_HAS_STD_STRING ((_MSC_VER >= 1400) || GTEST_HAS_EXCEPTIONS)
-#else // We are on Linux or Mac OS.
+#elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#define GTEST_HAS_EXCEPTIONS 1
+#elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+#define GTEST_HAS_EXCEPTIONS 1
+#elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#define GTEST_HAS_EXCEPTIONS 1
+#else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
#define GTEST_HAS_EXCEPTIONS 0
-#define GTEST_HAS_STD_STRING 1
-#endif // GTEST_OS_WINDOWS
+#endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
-#endif // GTEST_HAS_STD_STRING
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+#define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+#error "Google Test cannot be used where ::std::string isn't available."
+#endif // !defined(GTEST_HAS_STD_STRING)
#ifndef GTEST_HAS_GLOBAL_STRING
// The user didn't tell us whether ::string is available, so we need
@@ -227,35 +311,21 @@
// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
// is available.
-#if defined(GTEST_OS_CYGWIN) || defined(GTEST_OS_SOLARIS) || defined(GTEST_OS_HAIKU) || defined(_MINIX)
-// At least some versions of cygwin don't support ::std::wstring.
+// Cygwin 1.5 and below doesn't support ::std::wstring.
+// Cygwin 1.7 might add wstring support; this should be updated when clear.
// Solaris' libc++ doesn't support it either.
// Minix currently doesn't support it either.
-#define GTEST_HAS_STD_WSTRING 0
-#else
-#define GTEST_HAS_STD_WSTRING GTEST_HAS_STD_STRING
-#endif // defined(GTEST_OS_CYGWIN) || defined(GTEST_OS_SOLARIS)
+#define GTEST_HAS_STD_WSTRING (!(GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || GTEST_OS_HAIKU || defined(_MINIX)))
#endif // GTEST_HAS_STD_WSTRING
#ifndef GTEST_HAS_GLOBAL_WSTRING
// The user didn't tell us whether ::wstring is available, so we need
// to figure it out.
-#define GTEST_HAS_GLOBAL_WSTRING GTEST_HAS_GLOBAL_STRING
+#define GTEST_HAS_GLOBAL_WSTRING \
+ (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
#endif // GTEST_HAS_GLOBAL_WSTRING
-#if GTEST_HAS_STD_STRING || GTEST_HAS_GLOBAL_STRING || \
- GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-#include <string> // NOLINT
-#endif // GTEST_HAS_STD_STRING || GTEST_HAS_GLOBAL_STRING ||
- // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
-
-#if GTEST_HAS_STD_STRING
-#include <sstream> // NOLINT
-#else
-#include <strstream> // NOLINT
-#endif // GTEST_HAS_STD_STRING
-
// Determines whether RTTI is available.
#ifndef GTEST_HAS_RTTI
// The user didn't tell us whether RTTI is enabled, so we need to
@@ -267,72 +337,128 @@
#define GTEST_HAS_RTTI 1
#else
#define GTEST_HAS_RTTI 0
-#endif // _CPPRTTI
-
-#elif defined(__GNUC__)
+#endif
// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
-#if GTEST_GCC_VER_ >= 40302
+#elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
#ifdef __GXX_RTTI
#define GTEST_HAS_RTTI 1
#else
#define GTEST_HAS_RTTI 0
#endif // __GXX_RTTI
-#else
-// For gcc versions smaller than 4.3.2, we assume RTTI is enabled.
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+#elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+#ifdef __RTTI_ALL__
#define GTEST_HAS_RTTI 1
-#endif // GTEST_GCC_VER >= 40302
+#else
+#define GTEST_HAS_RTTI 0
+#endif
#else
-// Unknown compiler - assume RTTI is enabled.
+// For all other compilers, we assume RTTI is enabled.
#define GTEST_HAS_RTTI 1
#endif // _MSC_VER
#endif // GTEST_HAS_RTTI
-// Determines whether <pthread.h> is available.
-#ifndef GTEST_HAS_PTHREAD
-// The user didn't tell us, so we need to figure it out.
-
-#if defined(GTEST_OS_LINUX) || defined(GTEST_OS_MAC)
-#define GTEST_HAS_PTHREAD 1
-#else
-#define GTEST_HAS_PTHREAD 0
-#endif // GTEST_OS_LINUX || GTEST_OS_MAC
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+#include <typeinfo>
+#endif
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we assume pthreads support is
+// available on Linux and Mac.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+#define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC)
#endif // GTEST_HAS_PTHREAD
-// Determines whether tr1/tuple is available. If you have tr1/tuple
-// on your platform, define GTEST_HAS_TR1_TUPLE=1 for both the Google
-// Test project and your tests. If you would like Google Test to detect
-// tr1/tuple on your platform automatically, please open an issue
-// ticket at http://code.google.com/p/googletest.
+// Determines whether Google Test can use tr1/tuple. You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
#ifndef GTEST_HAS_TR1_TUPLE
+// The user didn't tell us not to do it, so we assume it's OK.
+#define GTEST_HAS_TR1_TUPLE 1
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
// The user didn't tell us, so we need to figure it out.
-// GCC provides <tr1/tuple> since 4.0.0.
-#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
-#define GTEST_HAS_TR1_TUPLE 1
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already. At this time, GCC 4.0.0+ and MSVC
+// 2010 are the only mainstream compilers that come with a TR1 tuple
+// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by
+// defining __GNUC__ and friends, but cannot compile GCC's tuple
+// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB
+// Feature Pack download, which we cannot assume the user has.
+#if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \
+ || _MSC_VER >= 1600
+#define GTEST_USE_OWN_TR1_TUPLE 0
#else
-#define GTEST_HAS_TR1_TUPLE 0
-#endif // __GNUC__
-#endif // GTEST_HAS_TR1_TUPLE
+#define GTEST_USE_OWN_TR1_TUPLE 1
+#endif
+
+#endif // GTEST_USE_OWN_TR1_TUPLE
// To avoid conditional compilation everywhere, we make it
// gtest-port.h's responsibility to #include the header implementing
// tr1/tuple.
#if GTEST_HAS_TR1_TUPLE
-#if defined(__GNUC__)
-// GCC implements tr1/tuple in the <tr1/tuple> header. This does not
-// conform to the TR1 spec, which requires the header to be <tuple>.
+
+#if GTEST_USE_OWN_TR1_TUPLE
+#include <gtest/internal/gtest-tuple.h>
+#elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+#ifdef BOOST_HAS_TR1_TUPLE
+#undef BOOST_HAS_TR1_TUPLE
+#endif // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+#define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+#include <tuple>
+
+#elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header. This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+#if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled. _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>. Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+#define _TR1_FUNCTIONAL 1
#include <tr1/tuple>
+#undef _TR1_FUNCTIONAL // Allows the user to #include
+ // <tr1/functional> if he chooses to.
#else
-// If the compiler is not GCC, we assume the user is using a
+#include <tr1/tuple> // NOLINT
+#endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+#else
+// If the compiler is not GCC 4.0+, we assume the user is using a
// spec-conforming TR1 implementation.
-#include <tuple>
-#endif // __GNUC__
+#include <tuple> // NOLINT
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
#endif // GTEST_HAS_TR1_TUPLE
// Determines whether clone(2) is supported.
@@ -342,55 +468,57 @@
#ifndef GTEST_HAS_CLONE
// The user didn't tell us, so we need to figure it out.
-#if defined(GTEST_OS_LINUX) && !defined(__ia64__)
+#if GTEST_OS_LINUX && !defined(__ia64__)
#define GTEST_HAS_CLONE 1
#else
#define GTEST_HAS_CLONE 0
-#endif // defined(GTEST_OS_LINUX) && !defined(__ia64__)
+#endif // GTEST_OS_LINUX && !defined(__ia64__)
#endif // GTEST_HAS_CLONE
-// Determines whether to support death tests.
-#if GTEST_HAS_STD_STRING && GTEST_HAS_CLONE
-#define GTEST_HAS_DEATH_TEST
-// On some platforms, <regex.h> needs someone to define size_t, and
-// won't compile otherwise. We can #include it here as we already
-// included <stdlib.h>, which is guaranteed to define size_t through
-// <stddef.h>.
-#include <regex.h>
-#include <vector>
-#include <fcntl.h>
-#include <sys/mman.h>
-#endif // GTEST_HAS_STD_STRING && GTEST_HAS_CLONE
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#define GTEST_HAS_STREAM_REDIRECTION_ 1
+#endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
-// Determines whether to support value-parameterized tests.
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+ GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX)
+#define GTEST_HAS_DEATH_TEST 1
+#include <vector> // NOLINT
+#endif
-#if defined(__GNUC__) || (_MSC_VER >= 1400)
-// TODO(vladl@google.com): get the implementation rid of vector and list
-// to compile on MSVC 7.1.
-#define GTEST_HAS_PARAM_TEST
-#endif // defined(__GNUC__) || (_MSC_VER >= 1400)
+// We don't support MSVC 7.1 with exceptions disabled now. Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
// Determines whether to support type-driven tests.
-// Typed tests need <typeinfo> and variadic macros, which gcc and VC
-// 8.0+ support.
-#if defined(__GNUC__) || (_MSC_VER >= 1400)
-#define GTEST_HAS_TYPED_TEST
-#define GTEST_HAS_TYPED_TEST_P
-#endif // defined(__GNUC__) || (_MSC_VER >= 1400)
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, and IBM Visual Age support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__)
+#define GTEST_HAS_TYPED_TEST 1
+#define GTEST_HAS_TYPED_TEST_P 1
+#endif
// Determines whether to support Combine(). This only makes sense when
-// value-parameterized tests are enabled.
-#if defined(GTEST_HAS_PARAM_TEST) && GTEST_HAS_TR1_TUPLE
-#define GTEST_HAS_COMBINE
-#endif // defined(GTEST_HAS_PARAM_TEST) && GTEST_HAS_TR1_TUPLE
+// value-parameterized tests are enabled. The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+#define GTEST_HAS_COMBINE 1
+#endif
// Determines whether the system compiler uses UTF-16 for encoding wide strings.
-#if defined(GTEST_OS_WINDOWS) || defined(GTEST_OS_CYGWIN) || \
- defined(GTEST_OS_SYMBIAN)
-#define GTEST_WIDE_STRING_USES_UTF16_ 1
-#endif
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
// Defines some utility macros.
@@ -408,7 +536,7 @@
#define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: // NOLINT
#endif
-// Use this annotation at the end of a struct / class definition to
+// Use this annotation at the end of a struct/class definition to
// prevent the compiler from optimizing away instances that are never
// used. This is useful when all interesting logic happens inside the
// c'tor and / or d'tor. Example:
@@ -416,17 +544,25 @@
// struct Foo {
// Foo() { ... }
// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
#if defined(__GNUC__) && !defined(COMPILER_ICC)
#define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
#else
#define GTEST_ATTRIBUTE_UNUSED_
#endif
-// A macro to disallow the evil copy constructor and operator= functions
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+ void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
// This should be used in the private: declarations for a class.
#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
- type(const type &);\
- void operator=(const type &)
+ type(type const &);\
+ GTEST_DISALLOW_ASSIGN_(type)
// Tell the compiler to warn about unused return values for functions declared
// with this macro. The macro should be used on function declarations
@@ -439,6 +575,36 @@
#define GTEST_MUST_USE_RESULT_
#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+#define GTEST_HAS_SEH 1
+#else
+// Assume no SEH.
+#define GTEST_HAS_SEH 0
+#endif
+
+#endif // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+
+#if GTEST_LINKED_AS_SHARED_LIBRARY
+#define GTEST_API_ __declspec(dllimport)
+#elif GTEST_CREATE_SHARED_LIBRARY
+#define GTEST_API_ __declspec(dllexport)
+#endif
+
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+#define GTEST_API_
+#endif
+
namespace testing {
class Message;
@@ -447,15 +613,11 @@ namespace internal {
class String;
-// std::strstream is deprecated. However, we have to use it on
-// Windows as std::stringstream won't compile on Windows when
-// exceptions are disabled. We use std::stringstream on other
-// platforms to avoid compiler warnings there.
-#if GTEST_HAS_STD_STRING
typedef ::std::stringstream StrStream;
-#else
-typedef ::std::strstream StrStream;
-#endif // GTEST_HAS_STD_STRING
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
// Defines scoped_ptr.
@@ -464,6 +626,8 @@ typedef ::std::strstream StrStream;
template <typename T>
class scoped_ptr {
public:
+ typedef T element_type;
+
explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
~scoped_ptr() { reset(); }
@@ -479,7 +643,7 @@ class scoped_ptr {
void reset(T* p = NULL) {
if (p != ptr_) {
- if (sizeof(T) > 0) { // Makes sure T is a complete type.
+ if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type.
delete ptr_;
}
ptr_ = p;
@@ -491,18 +655,18 @@ class scoped_ptr {
GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
};
-#ifdef GTEST_HAS_DEATH_TEST
-
// Defines RE.
-// A simple C++ wrapper for <regex.h>. It uses the POSIX Enxtended
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
// Regular Expression syntax.
-class RE {
+class GTEST_API_ RE {
public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
// Constructs an RE from a string.
-#if GTEST_HAS_STD_STRING
RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_GLOBAL_STRING
RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
@@ -521,14 +685,12 @@ class RE {
//
// TODO(wan@google.com): make FullMatch() and PartialMatch() work
// when str contains NUL characters.
-#if GTEST_HAS_STD_STRING
static bool FullMatch(const ::std::string& str, const RE& re) {
return FullMatch(str.c_str(), re);
}
static bool PartialMatch(const ::std::string& str, const RE& re) {
return PartialMatch(str.c_str(), re);
}
-#endif // GTEST_HAS_STD_STRING
#if GTEST_HAS_GLOBAL_STRING
static bool FullMatch(const ::string& str, const RE& re) {
@@ -550,15 +712,20 @@ class RE {
// String type here, in order to simplify dependencies between the
// files.
const char* pattern_;
+ bool is_valid_;
+#if GTEST_USES_POSIX_RE
regex_t full_regex_; // For FullMatch().
regex_t partial_regex_; // For PartialMatch().
- bool is_valid_;
-};
+#else // GTEST_USES_SIMPLE_RE
+ const char* full_pattern_; // For FullMatch();
+#endif
-#endif // GTEST_HAS_DEATH_TEST
+ GTEST_DISALLOW_ASSIGN_(RE);
+};
// Defines logging utilities:
-// GTEST_LOG_() - logs messages at the specified severity level.
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
// LogToStderr() - directs all log messages to stderr.
// FlushInfoLog() - flushes informational log messages.
@@ -569,35 +736,439 @@ enum GTestLogSeverity {
GTEST_FATAL
};
-void GTestLog(GTestLogSeverity severity, const char* file,
- int line, const char* msg);
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
-#define GTEST_LOG_(severity, msg)\
- ::testing::internal::GTestLog(\
- ::testing::internal::GTEST_##severity, __FILE__, __LINE__, \
- (::testing::Message() << (msg)).GetString().c_str())
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
inline void LogToStderr() {}
inline void FlushInfoLog() { fflush(NULL); }
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+#define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION_
+
// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
// CaptureStderr - starts capturing stderr.
// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ String GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ String GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION_
+
-#ifdef GTEST_HAS_DEATH_TEST
+#if GTEST_HAS_DEATH_TEST
// A copy of all command line arguments. Set by InitGoogleTest().
extern ::std::vector<String> g_argvs;
-void CaptureStderr();
// GTEST_HAS_DEATH_TEST implies we have ::std::string.
-::std::string GetCapturedStderr();
const ::std::vector<String>& GetArgvs();
#endif // GTEST_HAS_DEATH_TEST
// Defines synchronization primitives.
+#if GTEST_HAS_PTHREAD
+
+// Sleeps for (roughly) n milli-seconds. This function is only for
+// testing Google Test's own constructs. Don't use it in user tests,
+// either directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, NULL);
+}
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {}
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() { notified_ = true; }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ while(!notified_) {
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ volatile bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void (*UserThreadFunc)(T);
+
+ ThreadWithParam(
+ UserThreadFunc func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+ finished_ = true;
+ }
+ }
+
+ virtual void Run() {
+ if (thread_can_start_ != NULL)
+ thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ const UserThreadFunc func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true iff we know that the thread function has finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+#include <pthread.h>
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms. They
+// are used in conjunction with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end
+// // of the current scope.
+//
+// MutexBase implements behavior for both statically and dynamically
+// allocated mutexes. Do not use MutexBase directly. Instead, write
+// the following to define a static mutex:
+//
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//
+// You can forward declare a static mutex like this:
+//
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// To create a dynamic mutex, just define an object of type Mutex.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // We don't protect writing to owner_ here, as it's the caller's
+ // responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ owner_ = 0;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(owner_ == pthread_self())
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ pthread_t owner_; // The thread holding the mutex; 0 means no one holds it.
+};
+
+// Forward-declares a static mutex.
+#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+#define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ owner_ = 0;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock as the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// An object managed for a thread by a ThreadLocal instance is deleted
+// when the thread exits. Or, if the ThreadLocal instance dies in
+// that thread, when the ThreadLocal dies. It's the user's
+// responsibility to ensure that all other threads using a ThreadLocal
+// have exited when it dies, or the per-thread objects for those
+// threads will not be deleted.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : key_(CreateKey()),
+ default_() {}
+ explicit ThreadLocal(const T& value) : key_(CreateKey()),
+ default_(value) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != NULL) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = new ValueHolder(default_);
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ const T default_; // The default value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+#define GTEST_IS_THREADSAFE 1
+
+#else // GTEST_HAS_PTHREAD
+
// A dummy implementation of synchronization primitives (mutex, lock,
// and thread-local variable). Necessary for compiling Google Test where
// mutex is not supported - using Google Test in multiple threads is not
@@ -606,14 +1177,14 @@ const ::std::vector<String>& GetArgvs();
class Mutex {
public:
Mutex() {}
- explicit Mutex(int /*unused*/) {}
void AssertHeld() const {}
- enum { NO_CONSTRUCTOR_NEEDED_FOR_STATIC_MUTEX = 0 };
};
-// We cannot call it MutexLock directly as the ctor declaration would
-// conflict with a macro named MutexLock, which is defined on some
-// platforms. Hence the typedef trick below.
+#define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+#define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
class GTestMutexLock {
public:
explicit GTestMutexLock(Mutex*) {} // NOLINT
@@ -634,30 +1205,37 @@ class ThreadLocal {
T value_;
};
-// There's no portable way to detect the number of threads, so we just
-// return 0 to indicate that we cannot detect it.
-inline size_t GetThreadCount() { return 0; }
-
// The above synchronization primitives have dummy implementations.
// Therefore Google Test is not thread-safe.
#define GTEST_IS_THREADSAFE 0
-#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+#endif // GTEST_HAS_PTHREAD
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
// Passing non-POD classes through ellipsis (...) crashes the ARM
-// compiler. The Nokia Symbian and the IBM XL C/C++ compiler try to
-// instantiate a copy constructor for objects passed through ellipsis
-// (...), failing for uncopyable objects. We define this to indicate
-// the fact.
-#define GTEST_ELLIPSIS_NEEDS_COPY_ 1
+// compiler and generates a warning in Sun Studio. The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects. We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+#define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+#define GTEST_CAN_COMPARE_NULL 1
+#endif
// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
// const T& and const T* in a function template. These compilers
// _can_ decide between class template specializations for T and T*,
// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
#define GTEST_NEEDS_IS_POINTER_ 1
-
-#endif // defined(__SYMBIAN32__) || defined(__IBMCPP__)
+#endif
template <bool bool_value>
struct bool_constant {
@@ -675,15 +1253,146 @@ struct is_pointer : public false_type {};
template <typename T>
struct is_pointer<T*> : public true_type {};
-// Defines BiggestInt as the biggest signed integer type the compiler
-// supports.
-
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
+#define GTEST_PATH_SEP_ "\\"
+#define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
typedef __int64 BiggestInt;
#else
+#define GTEST_PATH_SEP_ "/"
+#define GTEST_HAS_ALT_PATH_SEP_ 0
typedef long long BiggestInt; // NOLINT
#endif // GTEST_OS_WINDOWS
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+#ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+#else // !__BORLANDC__
+#if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+#else
+inline int IsATTY(int fd) { return _isatty(fd); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+#endif // __BORLANDC__
+
+#if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+#else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+#ifdef _MSC_VER
+// Temporarily disable warning 4996 (deprecated function).
+#pragma warning(push)
+#pragma warning(disable:4996)
+#endif
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+ return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+ return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE
+ // We are on Windows CE, which has no environment variables.
+ return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop) // Restores the warning state.
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
// The maximum number a BiggestInt can represent. This definition
// works no matter BiggestInt is represented in one's complement or
// two's complement.
@@ -736,7 +1445,7 @@ class TypeWithSize<4> {
template <>
class TypeWithSize<8> {
public:
-#ifdef GTEST_OS_WINDOWS
+#if GTEST_OS_WINDOWS
typedef __int64 Int;
typedef unsigned __int64 UInt;
#else
@@ -754,96 +1463,23 @@ typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
// Utilities for command line flags and environment variables.
-// A wrapper for getenv() that works on Linux, Windows, and Mac OS.
-inline const char* GetEnv(const char* name) {
-#ifdef _WIN32_WCE // We are on Windows CE.
- // CE has no environment variables.
- return NULL;
-#elif defined(GTEST_OS_WINDOWS) // We are on Windows proper.
- // MSVC 8 deprecates getenv(), so we want to suppress warning 4996
- // (deprecated function) there.
-#pragma warning(push) // Saves the current warning state.
-#pragma warning(disable:4996) // Temporarily disables warning 4996.
- return getenv(name);
-#pragma warning(pop) // Restores the warning state.
-#else // We are on Linux or Mac OS.
- return getenv(name);
-#endif
-}
-
-#ifdef _WIN32_WCE
-// Windows CE has no C library. The abort() function is used in
-// several places in Google Test. This implementation provides a reasonable
-// imitation of standard behaviour.
-void abort();
-#else
-inline void abort() { ::abort(); }
-#endif // _WIN32_WCE
-
-// INTERNAL IMPLEMENTATION - DO NOT USE.
-//
-// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
-// is not satisfied.
-// Synopsys:
-// GTEST_CHECK_(boolean_condition);
-// or
-// GTEST_CHECK_(boolean_condition) << "Additional message";
-//
-// This checks the condition and if the condition is not satisfied
-// it prints message about the condition violation, including the
-// condition itself, plus additional message streamed into it, if any,
-// and then it aborts the program. It aborts the program irrespective of
-// whether it is built in the debug mode or not.
-class GTestCheckProvider {
- public:
- GTestCheckProvider(const char* condition, const char* file, int line) {
- FormatFileLocation(file, line);
- ::std::cerr << " ERROR: Condition " << condition << " failed. ";
- }
- ~GTestCheckProvider() {
- ::std::cerr << ::std::endl;
- abort();
- }
- void FormatFileLocation(const char* file, int line) {
- if (file == NULL)
- file = "unknown file";
- if (line < 0) {
- ::std::cerr << file << ":";
- } else {
-#if _MSC_VER
- ::std::cerr << file << "(" << line << "):";
-#else
- ::std::cerr << file << ":" << line << ":";
-#endif
- }
- }
- ::std::ostream& GetStream() { return ::std::cerr; }
-};
-#define GTEST_CHECK_(condition) \
- GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
- if (condition) \
- ; \
- else \
- ::testing::internal::GTestCheckProvider(\
- #condition, __FILE__, __LINE__).GetStream()
-
// Macro for referencing flags.
#define GTEST_FLAG(name) FLAGS_gtest_##name
// Macros for declaring flags.
-#define GTEST_DECLARE_bool_(name) extern bool GTEST_FLAG(name)
+#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
#define GTEST_DECLARE_int32_(name) \
- extern ::testing::internal::Int32 GTEST_FLAG(name)
+ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
#define GTEST_DECLARE_string_(name) \
- extern ::testing::internal::String GTEST_FLAG(name)
+ GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name)
// Macros for defining flags.
#define GTEST_DEFINE_bool_(name, default_val, doc) \
- bool GTEST_FLAG(name) = (default_val)
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
#define GTEST_DEFINE_int32_(name, default_val, doc) \
- ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
#define GTEST_DEFINE_string_(name, default_val, doc) \
- ::testing::internal::String GTEST_FLAG(name) = (default_val)
+ GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val)
// Parses 'str' for a 32-bit signed integer. If successful, writes the result
// to *value and returns true; otherwise leaves *value unchanged and returns
@@ -856,7 +1492,7 @@ bool ParseInt32(const Message& src_text, const char* str, Int32* value);
// Parses a bool/Int32/string from the environment variable
// corresponding to the given Google Test flag.
bool BoolFromGTestEnv(const char* flag, bool default_val);
-Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
const char* StringFromGTestEnv(const char* flag, const char* default_val);
} // namespace internal
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h
index 178f14e..aff093d 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-string.h
@@ -35,32 +35,34 @@
// Google Test. They are subject to change without notice. They should not used
// by code external to Google Test.
//
-// This header file is #included by testing/base/internal/gtest-internal.h.
+// This header file is #included by <gtest/internal/gtest-internal.h>.
// It should not be #included by other files.
#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+#include <mem.h>
+#endif
+
#include <string.h>
#include <gtest/internal/gtest-port.h>
-#if GTEST_HAS_GLOBAL_STRING || GTEST_HAS_STD_STRING
#include <string>
-#endif // GTEST_HAS_GLOBAL_STRING || GTEST_HAS_STD_STRING
namespace testing {
namespace internal {
// String - a UTF-8 string class.
//
-// We cannot use std::string as Microsoft's STL implementation in
-// Visual C++ 7.1 has problems when exception is disabled. There is a
-// hack to work around this, but we've seen cases where the hack fails
-// to work.
+// For historic reasons, we don't use std::string.
+//
+// TODO(wan@google.com): replace this class with std::string or
+// implement it in terms of the latter.
//
-// Also, String is different from std::string in that it can represent
-// both NULL and the empty string, while std::string cannot represent
-// NULL.
+// Note that String can represent both NULL and the empty string,
+// while std::string cannot represent NULL.
//
// NULL and the empty string are considered different. NULL is less
// than anything (including the empty string) except itself.
@@ -76,23 +78,10 @@ namespace internal {
//
// In order to make the representation efficient, the d'tor of String
// is not virtual. Therefore DO NOT INHERIT FROM String.
-class String {
+class GTEST_API_ String {
public:
// Static utility methods
- // Returns the input if it's not NULL, otherwise returns "(null)".
- // This function serves two purposes:
- //
- // 1. ShowCString(NULL) has type 'const char *', instead of the
- // type of NULL (which is int).
- //
- // 2. In MSVC, streaming a null char pointer to StrStream generates
- // an access violation, so we need to convert NULL to "(null)"
- // before streaming it.
- static inline const char* ShowCString(const char* c_str) {
- return c_str ? c_str : "(null)";
- }
-
// Returns the input enclosed in double quotes if it's not NULL;
// otherwise returns "(null)". For example, "\"Hello\"" is returned
// for input "Hello".
@@ -111,7 +100,7 @@ class String {
// memory using malloc().
static const char* CloneCString(const char* c_str);
-#ifdef _WIN32_WCE
+#if GTEST_OS_WINDOWS_MOBILE
// Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
// able to pass strings to Win32 APIs on CE we need to convert them
// to 'Unicode', UTF-16.
@@ -200,22 +189,29 @@ class String {
// C'tors
// The default c'tor constructs a NULL string.
- String() : c_str_(NULL) {}
+ String() : c_str_(NULL), length_(0) {}
// Constructs a String by cloning a 0-terminated C string.
- String(const char* c_str) : c_str_(NULL) { // NOLINT
- *this = c_str;
+ String(const char* a_c_str) { // NOLINT
+ if (a_c_str == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(a_c_str, strlen(a_c_str));
+ }
}
// Constructs a String by copying a given number of chars from a
- // buffer. E.g. String("hello", 3) will create the string "hel".
- String(const char* buffer, size_t len);
+ // buffer. E.g. String("hello", 3) creates the string "hel",
+ // String("a\0bcd", 4) creates "a\0bc", String(NULL, 0) creates "",
+ // and String(NULL, 1) results in access violation.
+ String(const char* buffer, size_t a_length) {
+ ConstructNonNull(buffer, a_length);
+ }
// The copy c'tor creates a new copy of the string. The two
// String objects do not share content.
- String(const String& str) : c_str_(NULL) {
- *this = str;
- }
+ String(const String& str) : c_str_(NULL), length_(0) { *this = str; }
// D'tor. String is intended to be a final class, so the d'tor
// doesn't need to be virtual.
@@ -227,22 +223,22 @@ class String {
// Converting a ::std::string or ::string containing an embedded NUL
// character to a String will result in the prefix up to the first
// NUL character.
-#if GTEST_HAS_STD_STRING
- String(const ::std::string& str) : c_str_(NULL) { *this = str.c_str(); }
+ String(const ::std::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
- operator ::std::string() const { return ::std::string(c_str_); }
-#endif // GTEST_HAS_STD_STRING
+ operator ::std::string() const { return ::std::string(c_str(), length()); }
#if GTEST_HAS_GLOBAL_STRING
- String(const ::string& str) : c_str_(NULL) { *this = str.c_str(); }
+ String(const ::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
- operator ::string() const { return ::string(c_str_); }
+ operator ::string() const { return ::string(c_str(), length()); }
#endif // GTEST_HAS_GLOBAL_STRING
// Returns true iff this is an empty string (i.e. "").
- bool empty() const {
- return (c_str_ != NULL) && (*c_str_ == '\0');
- }
+ bool empty() const { return (c_str() != NULL) && (length() == 0); }
// Compares this with another String.
// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
@@ -251,19 +247,15 @@ class String {
// Returns true iff this String equals the given C string. A NULL
// string and a non-NULL string are considered not equal.
- bool operator==(const char* c_str) const {
- return CStringEquals(c_str_, c_str);
- }
+ bool operator==(const char* a_c_str) const { return Compare(a_c_str) == 0; }
- // Returns true iff this String is less than the given C string. A NULL
- // string is considered less than "".
+ // Returns true iff this String is less than the given String. A
+ // NULL string is considered less than "".
bool operator<(const String& rhs) const { return Compare(rhs) < 0; }
// Returns true iff this String doesn't equal the given C string. A NULL
// string and a non-NULL string are considered not equal.
- bool operator!=(const char* c_str) const {
- return !CStringEquals(c_str_, c_str);
- }
+ bool operator!=(const char* a_c_str) const { return !(*this == a_c_str); }
// Returns true iff this String ends with the given suffix. *Any*
// String is considered to end with a NULL or empty suffix.
@@ -273,50 +265,73 @@ class String {
// case. Any String is considered to end with a NULL or empty suffix.
bool EndsWithCaseInsensitive(const char* suffix) const;
- // Returns the length of the encapsulated string, or -1 if the
+ // Returns the length of the encapsulated string, or 0 if the
// string is NULL.
- int GetLength() const {
- return c_str_ ? static_cast<int>(strlen(c_str_)) : -1;
- }
+ size_t length() const { return length_; }
// Gets the 0-terminated C string this String object represents.
// The String object still owns the string. Therefore the caller
// should NOT delete the return value.
const char* c_str() const { return c_str_; }
- // Sets the 0-terminated C string this String object represents.
- // The old string in this object is deleted, and this object will
- // own a clone of the input string. This function copies only up to
- // length bytes (plus a terminating null byte), or until the first
- // null byte, whichever comes first.
- //
- // This function works even when the c_str parameter has the same
- // value as that of the c_str_ field.
- void Set(const char* c_str, size_t length);
-
// Assigns a C string to this object. Self-assignment works.
- const String& operator=(const char* c_str);
+ const String& operator=(const char* a_c_str) {
+ return *this = String(a_c_str);
+ }
// Assigns a String object to this object. Self-assignment works.
- const String& operator=(const String &rhs) {
- *this = rhs.c_str_;
+ const String& operator=(const String& rhs) {
+ if (this != &rhs) {
+ delete[] c_str_;
+ if (rhs.c_str() == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(rhs.c_str(), rhs.length());
+ }
+ }
+
return *this;
}
private:
- const char* c_str_;
-};
+ // Constructs a non-NULL String from the given content. This
+ // function can only be called when data_ has not been allocated.
+ // ConstructNonNull(NULL, 0) results in an empty string ("").
+ // ConstructNonNull(NULL, non_zero) is undefined behavior.
+ void ConstructNonNull(const char* buffer, size_t a_length) {
+ char* const str = new char[a_length + 1];
+ memcpy(str, buffer, a_length);
+ str[a_length] = '\0';
+ c_str_ = str;
+ length_ = a_length;
+ }
-// Streams a String to an ostream.
-inline ::std::ostream& operator <<(::std::ostream& os, const String& str) {
- // We call String::ShowCString() to convert NULL to "(null)".
- // Otherwise we'll get an access violation on Windows.
- return os << String::ShowCString(str.c_str());
+ const char* c_str_;
+ size_t length_;
+}; // class String
+
+// Streams a String to an ostream. Each '\0' character in the String
+// is replaced with "\\0".
+inline ::std::ostream& operator<<(::std::ostream& os, const String& str) {
+ if (str.c_str() == NULL) {
+ os << "(null)";
+ } else {
+ const char* const c_str = str.c_str();
+ for (size_t i = 0; i != str.length(); i++) {
+ if (c_str[i] == '\0') {
+ os << "\\0";
+ } else {
+ os << c_str[i];
+ }
+ }
+ }
+ return os;
}
// Gets the content of the StrStream's buffer as a String. Each '\0'
// character in the buffer is replaced with "\\0".
-String StrStreamToString(StrStream* stream);
+GTEST_API_ String StrStreamToString(StrStream* stream);
// Converts a streamable value to a String. A NULL pointer is
// converted to "(null)". When the input value is a ::string,
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-tuple.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-tuple.h
new file mode 100644
index 0000000..16178fc
--- /dev/null
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-tuple.h
@@ -0,0 +1,968 @@
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+#define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+#define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+ void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+ void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+ void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+ void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T)> { typedef T0 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T)> { typedef T1 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T)> { typedef T2 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T)> { typedef T3 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T)> { typedef T4 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T)> { typedef T5 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T)> { typedef T6 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T)> { typedef T7 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T)> { typedef T8 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T)> { typedef T9 type; };
+
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+ tuple(const tuple& t) : f0_(t.f0_) {}
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ return *this;
+ }
+
+ T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+ f1_(f1) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+ GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_) {}
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_) {}
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+ GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+ f9_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+ f9_(t.f9_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ f9_ = t.f9_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+ T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+ return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+ return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+ return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3) {
+ return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4) {
+ return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5) {
+ return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+ return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+ return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8) {
+ return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8, const T9& f9) {
+ return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T)> { static const int value = 0; };
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T)> { static const int value = 1; };
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T)> { static const int value = 2; };
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T)> { static const int value = 3; };
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T)> { static const int value = 4; };
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T)> { static const int value = 5; };
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T)> { static const int value = 6; };
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T)> { static const int value = 7; };
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T)> { static const int value = 8; };
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T)> { static const int value = 9; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T)> { static const int value = 10; };
+
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ Field(Tuple& t) { return t.f0_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ Field(Tuple& t) { return t.f1_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ Field(Tuple& t) { return t.f2_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ Field(Tuple& t) { return t.f3_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ Field(Tuple& t) { return t.f4_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ Field(Tuple& t) { return t.f5_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ Field(Tuple& t) { return t.f6_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ Field(Tuple& t) { return t.f7_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ Field(Tuple& t) { return t.f8_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ Field(Tuple& t) { return t.f9_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ ConstField(const Tuple& t) { return t.f9_; }
+};
+
+} // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_10_TUPLE_(T)>::value,
+ tuple_size<GTEST_10_TUPLE_(U)>::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
diff --git a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h
index 815da4b..093eee6 100644
--- a/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h
+++ b/contrib/llvm/utils/unittest/googletest/include/gtest/internal/gtest-type-util.h
@@ -1,4 +1,6 @@
-// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+// This file was GENERATED by command:
+// pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
// Copyright 2008 Google Inc.
// All Rights Reserved.
@@ -45,13 +47,13 @@
#include <gtest/internal/gtest-port.h>
#include <gtest/internal/gtest-string.h>
-#if defined(GTEST_HAS_TYPED_TEST) || defined(GTEST_HAS_TYPED_TEST_P)
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
-#ifdef __GNUC__
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+#ifdef __GLIBCXX__
#include <cxxabi.h>
-#endif // __GNUC__
-
-#include <typeinfo>
+#endif // __GLIBCXX__
namespace testing {
namespace internal {
@@ -74,7 +76,7 @@ String GetTypeName() {
#if GTEST_HAS_RTTI
const char* const name = typeid(T).name();
-#ifdef __GNUC__
+#ifdef __GLIBCXX__
int status = 0;
// gcc's implementation of typeid(T).name() mangles the type name,
// so we have to demangle it.
@@ -84,7 +86,7 @@ String GetTypeName() {
return name_str;
#else
return name;
-#endif // __GNUC__
+#endif // __GLIBCXX__
#else
return "<type>";
diff --git a/etc/mtree/BSD.include.dist b/etc/mtree/BSD.include.dist
index a138bba..18af5dd 100644
--- a/etc/mtree/BSD.include.dist
+++ b/etc/mtree/BSD.include.dist
@@ -84,7 +84,7 @@
..
..
clang
- 2.0
+ 2.8
..
..
crypto
diff --git a/lib/clang/Makefile b/lib/clang/Makefile
index 5cead23..cced6c1 100644
--- a/lib/clang/Makefile
+++ b/lib/clang/Makefile
@@ -13,6 +13,7 @@ SUBDIR= libclanganalysis \
libclangsema \
\
libllvmanalysis \
+ libllvmasmparser \
libllvmasmprinter \
libllvmbitreader \
libllvmbitwriter \
diff --git a/lib/clang/clang.build.mk b/lib/clang/clang.build.mk
index a42e011..fad4317 100644
--- a/lib/clang/clang.build.mk
+++ b/lib/clang/clang.build.mk
@@ -15,9 +15,7 @@ CFLAGS+= -O1
TARGET_ARCH?= ${MACHINE_ARCH}
# XXX: 8.0, to keep __FreeBSD_cc_version happy
-CFLAGS+=-DLLVM_HOSTTRIPLE=\"${TARGET_ARCH}-undermydesk-freebsd9.0\" \
- -DCLANG_VENDOR=\"FreeBSD\ \" -DSVN_REVISION=\"104832\" \
- -DCLANG_VENDOR_SUFFIX=\"\ 20100615\"
+CFLAGS+=-DLLVM_HOSTTRIPLE=\"${TARGET_ARCH}-undermydesk-freebsd9.0\"
.PATH: ${LLVM_SRCS}/${SRCDIR}
@@ -48,6 +46,26 @@ ${arch:T}Gen${hdr:H:C/$/.inc.h/}: ${LLVM_SRCS}/lib/Target/${arch:H}/${arch:T}.td
. endfor
.endfor
+Attrs.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
+ -gen-clang-attr-classes ${.ALLSRC} > ${.TARGET}
+
+AttrList.inc.h: ${CLANG_SRCS}/include/clang/Basic/Attr.td
+ ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
+ -gen-clang-attr-list ${.ALLSRC} > ${.TARGET}
+
+DeclNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/DeclNodes.td
+ ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
+ -gen-clang-decl-nodes ${.ALLSRC} > ${.TARGET}
+
+StmtNodes.inc.h: ${CLANG_SRCS}/include/clang/Basic/StmtNodes.td
+ ${TBLGEN} -I${CLANG_SRCS}/include/clang/AST \
+ -gen-clang-stmt-nodes ${.ALLSRC} > ${.TARGET}
+
+arm_neon.inc.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
+ ${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
+ -gen-arm-neon-sema ${.ALLSRC} > ${.TARGET}
+
DiagnosticGroups.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
${TBLGEN} -I${CLANG_SRCS}/include/clang/Basic \
-gen-clang-diag-groups \
@@ -58,22 +76,20 @@ Diagnostic${hdr}Kinds.inc.h: ${CLANG_SRCS}/include/clang/Basic/Diagnostic.td
-gen-clang-diags-defs -clang-component=${hdr} \
${CLANG_SRCS}/include/clang/Basic/Diagnostic.td > ${.TARGET}
.endfor
-CC1AsOptions.inc.h: ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td
+Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/Options.td
${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
-gen-opt-parser-defs \
- ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td > ${.TARGET}
+ ${CLANG_SRCS}/include/clang/Driver/Options.td > ${.TARGET}
+
CC1Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/CC1Options.td
${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
-gen-opt-parser-defs \
${CLANG_SRCS}/include/clang/Driver/CC1Options.td > ${.TARGET}
-Options.inc.h: ${CLANG_SRCS}/include/clang/Driver/Options.td
+
+CC1AsOptions.inc.h: ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td
${TBLGEN} -I${CLANG_SRCS}/include/clang/Driver \
-gen-opt-parser-defs \
- ${CLANG_SRCS}/include/clang/Driver/Options.td > ${.TARGET}
-StmtNodes.inc.h: ${CLANG_SRCS}/include/clang/AST/StmtNodes.td
- ${TBLGEN} -I${CLANG_SRCS}/include/clang/AST \
- -gen-clang-stmt-nodes \
- ${CLANG_SRCS}/include/clang/AST/StmtNodes.td > ${.TARGET}
+ ${CLANG_SRCS}/include/clang/Driver/CC1AsOptions.td > ${.TARGET}
SRCS+= ${TGHDRS:C/$/.inc.h/}
DPADD+= ${TGHDRS:C/$/.inc.h/}
diff --git a/lib/clang/include/Makefile b/lib/clang/include/Makefile
index db85f80..e8319f8 100644
--- a/lib/clang/include/Makefile
+++ b/lib/clang/include/Makefile
@@ -2,9 +2,14 @@
.PATH: ${.CURDIR}/../../../contrib/llvm/tools/clang/lib/Headers
-INCSDIR=${INCLUDEDIR}/clang/2.0
+INCSDIR=${INCLUDEDIR}/clang/2.8
-INCS= emmintrin.h mm_malloc.h mmintrin.h pmmintrin.h tmmintrin.h xmmintrin.h
+INCS= emmintrin.h \
+ mm_malloc.h \
+ mmintrin.h \
+ pmmintrin.h \
+ tmmintrin.h \
+ xmmintrin.h
.include <bsd.init.mk>
.include <bsd.incs.mk>
diff --git a/lib/clang/include/clang/AST/Attrs.inc b/lib/clang/include/clang/AST/Attrs.inc
new file mode 100644
index 0000000..0eee102
--- /dev/null
+++ b/lib/clang/include/clang/AST/Attrs.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "Attrs.inc.h"
diff --git a/lib/clang/include/clang/AST/DeclNodes.inc b/lib/clang/include/clang/AST/DeclNodes.inc
new file mode 100644
index 0000000..d5b9a90
--- /dev/null
+++ b/lib/clang/include/clang/AST/DeclNodes.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "DeclNodes.inc.h"
diff --git a/lib/clang/include/clang/Basic/AttrList.inc b/lib/clang/include/clang/Basic/AttrList.inc
new file mode 100644
index 0000000..319eb5a
--- /dev/null
+++ b/lib/clang/include/clang/Basic/AttrList.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "AttrList.inc.h"
diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc
new file mode 100644
index 0000000..f8515b7
--- /dev/null
+++ b/lib/clang/include/clang/Basic/Version.inc
@@ -0,0 +1,10 @@
+/* $FreeBSD$ */
+
+#define CLANG_VERSION 2.8
+#define CLANG_VERSION_MAJOR 2
+#define CLANG_VERSION_MINOR 8
+
+#define CLANG_VENDOR "FreeBSD "
+#define CLANG_VENDOR_SUFFIX " 20100720"
+
+#define SVN_REVISION "108428"
diff --git a/lib/clang/include/clang/Basic/arm_neon.inc b/lib/clang/include/clang/Basic/arm_neon.inc
new file mode 100644
index 0000000..7b4c875
--- /dev/null
+++ b/lib/clang/include/clang/Basic/arm_neon.inc
@@ -0,0 +1,2 @@
+/* $FreeBSD$ */
+#include "arm_neon.inc.h"
diff --git a/lib/clang/include/llvm/Config/AsmParsers.def b/lib/clang/include/llvm/Config/AsmParsers.def
index c6e1ffd..0fdc4ff 100644
--- a/lib/clang/include/llvm/Config/AsmParsers.def
+++ b/lib/clang/include/llvm/Config/AsmParsers.def
@@ -1,30 +1,6 @@
/* $FreeBSD$ */
-//===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file enumerates all of the assembly-language parsers
-// supported by this build of LLVM. Clients of this file should define
-// the LLVM_ASM_PARSER macro to be a function-like macro with a
-// single parameter (the name of the target whose assembly can be
-// generated); including this file will then enumerate all of the
-// targets with assembly parsers.
-//
-// The set of targets supported by LLVM is generated at configuration
-// time, at which point this header is generated. Do not modify this
-// header directly.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_ASM_PARSER
-# error Please define the macro LLVM_ASM_PARSER(TargetName)
-#endif
-
-LLVM_ASM_PARSER(ARM) LLVM_ASM_PARSER(X86)
+LLVM_ASM_PARSER(ARM)
+LLVM_ASM_PARSER(X86)
#undef LLVM_ASM_PARSER
diff --git a/lib/clang/include/llvm/Config/AsmPrinters.def b/lib/clang/include/llvm/Config/AsmPrinters.def
index 76e2377..ab48b4a 100644
--- a/lib/clang/include/llvm/Config/AsmPrinters.def
+++ b/lib/clang/include/llvm/Config/AsmPrinters.def
@@ -1,30 +1,8 @@
/* $FreeBSD$ */
-//===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file enumerates all of the assembly-language printers
-// supported by this build of LLVM. Clients of this file should define
-// the LLVM_ASM_PRINTER macro to be a function-like macro with a
-// single parameter (the name of the target whose assembly can be
-// generated); including this file will then enumerate all of the
-// targets with assembly printers.
-//
-// The set of targets supported by LLVM is generated at configuration
-// time, at which point this header is generated. Do not modify this
-// header directly.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_ASM_PRINTER
-# error Please define the macro LLVM_ASM_PRINTER(TargetName)
-#endif
-
-LLVM_ASM_PRINTER(Mips) LLVM_ASM_PRINTER(ARM) LLVM_ASM_PRINTER(PowerPC) LLVM_ASM_PRINTER(X86)
+LLVM_ASM_PRINTER(Mips)
+LLVM_ASM_PRINTER(ARM)
+LLVM_ASM_PRINTER(PowerPC)
+LLVM_ASM_PRINTER(X86)
#undef LLVM_ASM_PRINTER
diff --git a/lib/clang/include/llvm/Config/Disassemblers.def b/lib/clang/include/llvm/Config/Disassemblers.def
index bc79e01..9d9093a 100644
--- a/lib/clang/include/llvm/Config/Disassemblers.def
+++ b/lib/clang/include/llvm/Config/Disassemblers.def
@@ -1,30 +1,5 @@
/* $FreeBSD$ */
-//===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file enumerates all of the assembly-language parsers
-// supported by this build of LLVM. Clients of this file should define
-// the LLVM_ASM_PARSER macro to be a function-like macro with a
-// single parameter (the name of the target whose assembly can be
-// generated); including this file will then enumerate all of the
-// targets with assembly parsers.
-//
-// The set of targets supported by LLVM is generated at configuration
-// time, at which point this header is generated. Do not modify this
-// header directly.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_DISASSEMBLER
-# error Please define the macro LLVM_DISASSEMBLER(TargetName)
-#endif
-
-LLVM_DISASSEMBLER(X86)
+LLVM_DISASSEMBLER(X86)
#undef LLVM_DISASSEMBLER
diff --git a/lib/clang/include/llvm/Config/Targets.def b/lib/clang/include/llvm/Config/Targets.def
index 47ae809..e33b41e 100644
--- a/lib/clang/include/llvm/Config/Targets.def
+++ b/lib/clang/include/llvm/Config/Targets.def
@@ -1,29 +1,8 @@
/* $FreeBSD$ */
-/*===- llvm/Config/Targets.def - LLVM Target Architectures ------*- C++ -*-===*\
-|* *|
-|* The LLVM Compiler Infrastructure *|
-|* *|
-|* This file is distributed under the University of Illinois Open Source *|
-|* License. See LICENSE.TXT for details. *|
-|* *|
-|*===----------------------------------------------------------------------===*|
-|* *|
-|* This file enumerates all of the target architectures supported by *|
-|* this build of LLVM. Clients of this file should define the *|
-|* LLVM_TARGET macro to be a function-like macro with a single *|
-|* parameter (the name of the target); including this file will then *|
-|* enumerate all of the targets. *|
-|* *|
-|* The set of targets supported by LLVM is generated at configuration *|
-|* time, at which point this header is generated. Do not modify this *|
-|* header directly. *|
-|* *|
-\*===----------------------------------------------------------------------===*/
-#ifndef LLVM_TARGET
-# error Please define the macro LLVM_TARGET(TargetName)
-#endif
-
-LLVM_TARGET(Mips) LLVM_TARGET(ARM) LLVM_TARGET(PowerPC) LLVM_TARGET(X86)
+LLVM_TARGET(Mips)
+LLVM_TARGET(ARM)
+LLVM_TARGET(PowerPC)
+LLVM_TARGET(X86)
#undef LLVM_TARGET
diff --git a/lib/clang/libclanganalysis/Makefile b/lib/clang/libclanganalysis/Makefile
index c88d2b9..ad64529 100644
--- a/lib/clang/libclanganalysis/Makefile
+++ b/lib/clang/libclanganalysis/Makefile
@@ -3,10 +3,18 @@
LIB= clanganalysis
SRCDIR= tools/clang/lib/Analysis
-SRCS= AnalysisContext.cpp CFG.cpp LiveVariables.cpp \
- PrintfFormatString.cpp ReachableCode.cpp \
+SRCS= AnalysisContext.cpp \
+ CFG.cpp \
+ LiveVariables.cpp \
+ PrintfFormatString.cpp \
+ ReachableCode.cpp \
UninitializedValues.cpp
-TGHDRS= DiagnosticAnalysisKinds DiagnosticCommonKinds StmtNodes
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticAnalysisKinds \
+ DiagnosticCommonKinds \
+ StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangast/Makefile b/lib/clang/libclangast/Makefile
index 2061939..06cb000 100644
--- a/lib/clang/libclangast/Makefile
+++ b/lib/clang/libclangast/Makefile
@@ -3,17 +3,49 @@
LIB= clangast
SRCDIR= tools/clang/lib/AST
-SRCS= APValue.cpp ASTConsumer.cpp ASTContext.cpp ASTDiagnostic.cpp \
- ASTImporter.cpp AttrImpl.cpp CXXInheritance.cpp Decl.cpp \
- DeclBase.cpp DeclCXX.cpp DeclFriend.cpp DeclGroup.cpp \
- DeclObjC.cpp DeclPrinter.cpp DeclTemplate.cpp \
- DeclarationName.cpp Expr.cpp ExprCXX.cpp ExprConstant.cpp \
- FullExpr.cpp InheritViz.cpp NestedNameSpecifier.cpp \
- ParentMap.cpp RecordLayout.cpp RecordLayoutBuilder.cpp \
- Stmt.cpp StmtDumper.cpp StmtIterator.cpp StmtPrinter.cpp \
- StmtProfile.cpp StmtViz.cpp TemplateBase.cpp TemplateName.cpp \
- Type.cpp TypeLoc.cpp TypePrinter.cpp
+SRCS= APValue.cpp \
+ ASTConsumer.cpp \
+ ASTContext.cpp \
+ ASTDiagnostic.cpp \
+ ASTImporter.cpp \
+ AttrImpl.cpp \
+ CXXInheritance.cpp \
+ Decl.cpp \
+ DeclBase.cpp \
+ DeclCXX.cpp \
+ DeclFriend.cpp \
+ DeclGroup.cpp \
+ DeclObjC.cpp \
+ DeclPrinter.cpp \
+ DeclTemplate.cpp \
+ DeclarationName.cpp \
+ Expr.cpp \
+ ExprCXX.cpp \
+ ExprClassification.cpp \
+ ExprConstant.cpp \
+ FullExpr.cpp \
+ InheritViz.cpp \
+ NestedNameSpecifier.cpp \
+ ParentMap.cpp \
+ RecordLayout.cpp \
+ RecordLayoutBuilder.cpp \
+ Stmt.cpp \
+ StmtDumper.cpp \
+ StmtIterator.cpp \
+ StmtPrinter.cpp \
+ StmtProfile.cpp \
+ StmtViz.cpp \
+ TemplateBase.cpp \
+ TemplateName.cpp \
+ Type.cpp \
+ TypeLoc.cpp \
+ TypePrinter.cpp
-TGHDRS= DiagnosticASTKinds DiagnosticCommonKinds StmtNodes
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticASTKinds \
+ DiagnosticCommonKinds \
+ StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangbasic/Makefile b/lib/clang/libclangbasic/Makefile
index 48e90a2..cd57855 100644
--- a/lib/clang/libclangbasic/Makefile
+++ b/lib/clang/libclangbasic/Makefile
@@ -3,13 +3,29 @@
LIB= clangbasic
SRCDIR= tools/clang/lib/Basic
-SRCS= Builtins.cpp ConvertUTF.c Diagnostic.cpp FileManager.cpp \
- IdentifierTable.cpp SourceLocation.cpp SourceManager.cpp \
- TargetInfo.cpp Targets.cpp TokenKinds.cpp Version.cpp
-
-TGHDRS= DiagnosticASTKinds DiagnosticAnalysisKinds \
- DiagnosticCommonKinds DiagnosticDriverKinds \
- DiagnosticFrontendKinds DiagnosticGroups DiagnosticLexKinds \
- DiagnosticParseKinds DiagnosticSemaKinds
+SRCS= Builtins.cpp \
+ ConvertUTF.c \
+ Diagnostic.cpp \
+ FileManager.cpp \
+ IdentifierTable.cpp \
+ SourceLocation.cpp \
+ SourceManager.cpp \
+ TargetInfo.cpp \
+ Targets.cpp \
+ TokenKinds.cpp \
+ Version.cpp
+
+TGHDRS= AttrList \
+ Attrs \
+ DiagnosticASTKinds \
+ DiagnosticAnalysisKinds \
+ DiagnosticCommonKinds \
+ DiagnosticDriverKinds \
+ DiagnosticFrontendKinds \
+ DiagnosticGroups \
+ DiagnosticLexKinds \
+ DiagnosticParseKinds \
+ DiagnosticSemaKinds \
+ arm_neon
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangchecker/Makefile b/lib/clang/libclangchecker/Makefile
index e052768..0bcecef 100644
--- a/lib/clang/libclangchecker/Makefile
+++ b/lib/clang/libclangchecker/Makefile
@@ -3,36 +3,87 @@
LIB= clangchecker
SRCDIR= tools/clang/lib/Checker
-SRCS= AdjustedReturnValueChecker.cpp AggExprVisitor.cpp \
- ArrayBoundChecker.cpp AttrNonNullChecker.cpp \
- BasicConstraintManager.cpp BasicObjCFoundationChecks.cpp \
- BasicStore.cpp BasicValueFactory.cpp BugReporter.cpp \
- BugReporterVisitors.cpp BuiltinFunctionChecker.cpp \
- CFRefCount.cpp CallAndMessageChecker.cpp CallInliner.cpp \
- CastSizeChecker.cpp CastToStructChecker.cpp \
- CheckDeadStores.cpp CheckObjCDealloc.cpp \
- CheckObjCInstMethSignature.cpp CheckSecuritySyntaxOnly.cpp \
- CheckSizeofPointer.cpp Checker.cpp CocoaConventions.cpp \
- DereferenceChecker.cpp DivZeroChecker.cpp Environment.cpp \
- ExplodedGraph.cpp FixedAddressChecker.cpp FlatStore.cpp \
- GRBlockCounter.cpp GRCXXExprEngine.cpp GRCoreEngine.cpp \
- GRExprEngine.cpp GRExprEngineExperimentalChecks.cpp \
- GRState.cpp LLVMConventionsChecker.cpp MacOSXAPIChecker.cpp \
- MallocChecker.cpp ManagerRegistry.cpp MemRegion.cpp \
- NSAutoreleasePoolChecker.cpp NSErrorChecker.cpp \
- NoReturnFunctionChecker.cpp OSAtomicChecker.cpp \
- ObjCUnusedIVarsChecker.cpp PathDiagnostic.cpp \
- PointerArithChecker.cpp PointerSubChecker.cpp \
- PthreadLockChecker.cpp RangeConstraintManager.cpp \
- RegionStore.cpp ReturnPointerRangeChecker.cpp \
- ReturnStackAddressChecker.cpp ReturnUndefChecker.cpp SVals.cpp \
- SValuator.cpp SimpleConstraintManager.cpp SimpleSValuator.cpp \
- Store.cpp SymbolManager.cpp UndefBranchChecker.cpp \
- UndefCapturedBlockVarChecker.cpp UndefResultChecker.cpp \
+SRCS= AdjustedReturnValueChecker.cpp \
+ AggExprVisitor.cpp \
+ AnalysisConsumer.cpp \
+ ArrayBoundChecker.cpp \
+ AttrNonNullChecker.cpp \
+ BasicConstraintManager.cpp \
+ BasicObjCFoundationChecks.cpp \
+ BasicStore.cpp \
+ BasicValueFactory.cpp \
+ BugReporter.cpp \
+ BugReporterVisitors.cpp \
+ BuiltinFunctionChecker.cpp \
+ CFRefCount.cpp \
+ CStringChecker.cpp \
+ CallAndMessageChecker.cpp \
+ CallInliner.cpp \
+ CastSizeChecker.cpp \
+ CastToStructChecker.cpp \
+ CheckDeadStores.cpp \
+ CheckObjCDealloc.cpp \
+ CheckObjCInstMethSignature.cpp \
+ CheckSecuritySyntaxOnly.cpp \
+ CheckSizeofPointer.cpp \
+ Checker.cpp \
+ CocoaConventions.cpp \
+ DereferenceChecker.cpp \
+ DivZeroChecker.cpp \
+ Environment.cpp \
+ ExplodedGraph.cpp \
+ FixedAddressChecker.cpp \
+ FlatStore.cpp \
+ FrontendActions.cpp \
+ GRBlockCounter.cpp \
+ GRCXXExprEngine.cpp \
+ GRCoreEngine.cpp \
+ GRExprEngine.cpp \
+ GRExprEngineExperimentalChecks.cpp \
+ GRState.cpp \
+ HTMLDiagnostics.cpp \
+ IdempotentOperationChecker.cpp \
+ LLVMConventionsChecker.cpp \
+ MacOSXAPIChecker.cpp \
+ MallocChecker.cpp \
+ ManagerRegistry.cpp \
+ MemRegion.cpp \
+ NSAutoreleasePoolChecker.cpp \
+ NSErrorChecker.cpp \
+ NoReturnFunctionChecker.cpp \
+ OSAtomicChecker.cpp \
+ ObjCUnusedIVarsChecker.cpp \
+ PathDiagnostic.cpp \
+ PlistDiagnostics.cpp \
+ PointerArithChecker.cpp \
+ PointerSubChecker.cpp \
+ PthreadLockChecker.cpp \
+ RangeConstraintManager.cpp \
+ RegionStore.cpp \
+ ReturnPointerRangeChecker.cpp \
+ ReturnUndefChecker.cpp \
+ SVals.cpp \
+ SValuator.cpp \
+ SimpleConstraintManager.cpp \
+ SimpleSValuator.cpp \
+ StackAddrLeakChecker.cpp \
+ StreamChecker.cpp \
+ Store.cpp \
+ SymbolManager.cpp \
+ UndefBranchChecker.cpp \
+ UndefCapturedBlockVarChecker.cpp \
+ UndefResultChecker.cpp \
UndefinedArraySubscriptChecker.cpp \
UndefinedAssignmentChecker.cpp \
- UnixAPIChecker.cpp VLASizeChecker.cpp ValueManager.cpp
+ UnixAPIChecker.cpp \
+ VLASizeChecker.cpp \
+ ValueManager.cpp
-TGHDRS= DiagnosticAnalysisKinds DiagnosticCommonKinds StmtNodes
+TGHDRS= Attrs \
+ AttrList \
+ DeclNodes \
+ DiagnosticAnalysisKinds \
+ DiagnosticCommonKinds \
+ StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangcodegen/Makefile b/lib/clang/libclangcodegen/Makefile
index 0b9e2f5..82e423b 100644
--- a/lib/clang/libclangcodegen/Makefile
+++ b/lib/clang/libclangcodegen/Makefile
@@ -3,15 +3,48 @@
LIB= clangcodegen
SRCDIR= tools/clang/lib/CodeGen
-SRCS= CGBlocks.cpp CGBuiltin.cpp CGCXX.cpp CGCall.cpp CGClass.cpp \
- CGDebugInfo.cpp CGDecl.cpp CGDeclCXX.cpp CGException.cpp \
- CGExpr.cpp CGExprAgg.cpp CGExprCXX.cpp CGExprComplex.cpp \
- CGExprConstant.cpp CGExprScalar.cpp CGObjC.cpp CGObjCGNU.cpp \
- CGObjCMac.cpp CGRTTI.cpp CGRecordLayoutBuilder.cpp CGStmt.cpp \
- CGTemporaries.cpp CGVTT.cpp CGVTables.cpp CodeGenFunction.cpp \
- CodeGenModule.cpp CodeGenTypes.cpp ItaniumCXXABI.cpp \
- Mangle.cpp ModuleBuilder.cpp TargetInfo.cpp
+SRCS= BackendUtil.cpp \
+ CGBlocks.cpp \
+ CGBuiltin.cpp \
+ CGCXX.cpp \
+ CGCall.cpp \
+ CGClass.cpp \
+ CGDebugInfo.cpp \
+ CGDecl.cpp \
+ CGDeclCXX.cpp \
+ CGException.cpp \
+ CGExpr.cpp \
+ CGExprAgg.cpp \
+ CGExprCXX.cpp \
+ CGExprComplex.cpp \
+ CGExprConstant.cpp \
+ CGExprScalar.cpp \
+ CGObjC.cpp \
+ CGObjCGNU.cpp \
+ CGObjCMac.cpp \
+ CGRTTI.cpp \
+ CGRecordLayoutBuilder.cpp \
+ CGStmt.cpp \
+ CGTemporaries.cpp \
+ CGVTT.cpp \
+ CGVTables.cpp \
+ CodeGenAction.cpp \
+ CodeGenFunction.cpp \
+ CodeGenModule.cpp \
+ CodeGenTypes.cpp \
+ ItaniumCXXABI.cpp \
+ Mangle.cpp \
+ MicrosoftCXXABI.cpp \
+ ModuleBuilder.cpp \
+ TargetInfo.cpp
-TGHDRS= DiagnosticCommonKinds Intrinsics StmtNodes
+TGHDRS= Attrs \
+ AttrList \
+ DeclNodes \
+ DiagnosticCommonKinds \
+ DiagnosticFrontendKinds \
+ Intrinsics \
+ StmtNodes \
+ arm_neon
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangdriver/Makefile b/lib/clang/libclangdriver/Makefile
index 7f05334..ce9d251 100644
--- a/lib/clang/libclangdriver/Makefile
+++ b/lib/clang/libclangdriver/Makefile
@@ -3,12 +3,29 @@
LIB= clangdriver
SRCDIR= tools/clang/lib/Driver
-SRCS= Action.cpp Arg.cpp ArgList.cpp CC1AsOptions.cpp CC1Options.cpp \
- Compilation.cpp Driver.cpp DriverOptions.cpp HostInfo.cpp \
- Job.cpp OptTable.cpp Option.cpp Phases.cpp Tool.cpp \
- ToolChain.cpp ToolChains.cpp Tools.cpp Types.cpp
+SRCS= Action.cpp \
+ Arg.cpp \
+ ArgList.cpp \
+ CC1AsOptions.cpp \
+ CC1Options.cpp \
+ Compilation.cpp \
+ Driver.cpp \
+ DriverOptions.cpp \
+ HostInfo.cpp \
+ Job.cpp \
+ OptTable.cpp \
+ Option.cpp \
+ Phases.cpp \
+ Tool.cpp \
+ ToolChain.cpp \
+ ToolChains.cpp \
+ Tools.cpp \
+ Types.cpp
-TGHDRS= CC1AsOptions CC1Options DiagnosticCommonKinds \
- DiagnosticDriverKinds Options
+TGHDRS= CC1AsOptions \
+ CC1Options \
+ DiagnosticCommonKinds \
+ DiagnosticDriverKinds \
+ Options
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangfrontend/Makefile b/lib/clang/libclangfrontend/Makefile
index 68c4620..7024a60 100644
--- a/lib/clang/libclangfrontend/Makefile
+++ b/lib/clang/libclangfrontend/Makefile
@@ -3,23 +3,49 @@
LIB= clangfrontend
SRCDIR= tools/clang/lib/Frontend
-SRCS= ASTConsumers.cpp ASTMerge.cpp ASTUnit.cpp AnalysisConsumer.cpp \
- BoostConAction.cpp CacheTokens.cpp CodeGenAction.cpp \
- CompilerInstance.cpp CompilerInvocation.cpp DeclXML.cpp \
- DependencyFile.cpp DiagChecker.cpp DocumentXML.cpp \
- FixItRewriter.cpp FrontendAction.cpp FrontendActions.cpp \
- FrontendOptions.cpp GeneratePCH.cpp HTMLDiagnostics.cpp \
- HTMLPrint.cpp InitHeaderSearch.cpp InitPreprocessor.cpp \
- LangStandards.cpp PCHReader.cpp PCHReaderDecl.cpp \
- PCHReaderStmt.cpp PCHWriter.cpp PCHWriterDecl.cpp \
- PCHWriterStmt.cpp PlistDiagnostics.cpp \
- PrintParserCallbacks.cpp PrintPreprocessedOutput.cpp \
- RewriteMacros.cpp RewriteObjC.cpp RewriteTest.cpp StmtXML.cpp \
- TextDiagnosticBuffer.cpp TextDiagnosticPrinter.cpp TypeXML.cpp \
- VerifyDiagnosticsClient.cpp Warnings.cpp
+SRCS= ASTConsumers.cpp \
+ ASTMerge.cpp \
+ ASTUnit.cpp \
+ BoostConAction.cpp \
+ CacheTokens.cpp \
+ CompilerInstance.cpp \
+ CompilerInvocation.cpp \
+ DeclXML.cpp \
+ DependencyFile.cpp \
+ DiagChecker.cpp \
+ DocumentXML.cpp \
+ FrontendAction.cpp \
+ FrontendActions.cpp \
+ FrontendOptions.cpp \
+ GeneratePCH.cpp \
+ InitHeaderSearch.cpp \
+ InitPreprocessor.cpp \
+ LangStandards.cpp \
+ PCHReader.cpp \
+ PCHReaderDecl.cpp \
+ PCHReaderStmt.cpp \
+ PCHWriter.cpp \
+ PCHWriterDecl.cpp \
+ PCHWriterStmt.cpp \
+ PrintParserCallbacks.cpp \
+ PrintPreprocessedOutput.cpp \
+ StmtXML.cpp \
+ TextDiagnosticBuffer.cpp \
+ TextDiagnosticPrinter.cpp \
+ TypeXML.cpp \
+ VerifyDiagnosticsClient.cpp \
+ Warnings.cpp
-TGHDRS= CC1Options DiagnosticASTKinds DiagnosticCommonKinds \
- DiagnosticDriverKinds DiagnosticFrontendKinds \
- DiagnosticLexKinds DiagnosticSemaKinds StmtNodes
+TGHDRS= AttrList \
+ Attrs \
+ CC1Options \
+ DeclNodes \
+ DiagnosticASTKinds \
+ DiagnosticCommonKinds \
+ DiagnosticDriverKinds \
+ DiagnosticFrontendKinds \
+ DiagnosticLexKinds \
+ DiagnosticSemaKinds \
+ StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclanglex/Makefile b/lib/clang/libclanglex/Makefile
index fc9f9b2..989be0d 100644
--- a/lib/clang/libclanglex/Makefile
+++ b/lib/clang/libclanglex/Makefile
@@ -3,13 +3,27 @@
LIB= clanglex
SRCDIR= tools/clang/lib/Lex
-SRCS= HeaderMap.cpp HeaderSearch.cpp Lexer.cpp LiteralSupport.cpp \
- MacroArgs.cpp MacroInfo.cpp PPCaching.cpp PPDirectives.cpp \
- PPExpressions.cpp PPLexerChange.cpp PPMacroExpansion.cpp \
- PTHLexer.cpp Pragma.cpp PreprocessingRecord.cpp \
- Preprocessor.cpp PreprocessorLexer.cpp ScratchBuffer.cpp \
- TokenConcatenation.cpp TokenLexer.cpp
-
-TGHDRS= DiagnosticCommonKinds DiagnosticLexKinds
+SRCS= HeaderMap.cpp \
+ HeaderSearch.cpp \
+ Lexer.cpp \
+ LiteralSupport.cpp \
+ MacroArgs.cpp \
+ MacroInfo.cpp \
+ PPCaching.cpp \
+ PPDirectives.cpp \
+ PPExpressions.cpp \
+ PPLexerChange.cpp \
+ PPMacroExpansion.cpp \
+ PTHLexer.cpp \
+ Pragma.cpp \
+ PreprocessingRecord.cpp \
+ Preprocessor.cpp \
+ PreprocessorLexer.cpp \
+ ScratchBuffer.cpp \
+ TokenConcatenation.cpp \
+ TokenLexer.cpp
+
+TGHDRS= DiagnosticCommonKinds \
+ DiagnosticLexKinds
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangparse/Makefile b/lib/clang/libclangparse/Makefile
index a755c65..0877749 100644
--- a/lib/clang/libclangparse/Makefile
+++ b/lib/clang/libclangparse/Makefile
@@ -3,12 +3,23 @@
LIB= clangparse
SRCDIR= tools/clang/lib/Parse
-SRCS= AttributeList.cpp DeclSpec.cpp MinimalAction.cpp \
- ParseCXXInlineMethods.cpp ParseDecl.cpp ParseDeclCXX.cpp \
- ParseExpr.cpp ParseExprCXX.cpp ParseInit.cpp ParseObjc.cpp \
- ParsePragma.cpp ParseStmt.cpp ParseTemplate.cpp \
- ParseTentative.cpp Parser.cpp
+SRCS= AttributeList.cpp \
+ DeclSpec.cpp \
+ MinimalAction.cpp \
+ ParseCXXInlineMethods.cpp \
+ ParseDecl.cpp \
+ ParseDeclCXX.cpp \
+ ParseExpr.cpp \
+ ParseExprCXX.cpp \
+ ParseInit.cpp \
+ ParseObjc.cpp \
+ ParsePragma.cpp \
+ ParseStmt.cpp \
+ ParseTemplate.cpp \
+ ParseTentative.cpp \
+ Parser.cpp
-TGHDRS= DiagnosticCommonKinds DiagnosticParseKinds
+TGHDRS= DiagnosticCommonKinds \
+ DiagnosticParseKinds
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangrewrite/Makefile b/lib/clang/libclangrewrite/Makefile
index c564283..496c699 100644
--- a/lib/clang/libclangrewrite/Makefile
+++ b/lib/clang/libclangrewrite/Makefile
@@ -3,9 +3,23 @@
LIB= clangrewrite
SRCDIR= tools/clang/lib/Rewrite
-SRCS= DeltaTree.cpp HTMLRewrite.cpp RewriteRope.cpp Rewriter.cpp \
+SRCS= DeltaTree.cpp \
+ FixItRewriter.cpp \
+ FrontendActions.cpp \
+ HTMLPrint.cpp \
+ HTMLRewrite.cpp \
+ RewriteMacros.cpp \
+ RewriteObjC.cpp \
+ RewriteRope.cpp \
+ RewriteTest.cpp \
+ Rewriter.cpp \
TokenRewriter.cpp
-TGHDRS= DiagnosticCommonKinds StmtNodes
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticCommonKinds \
+ DiagnosticFrontendKinds \
+ StmtNodes
.include "../clang.lib.mk"
diff --git a/lib/clang/libclangsema/Makefile b/lib/clang/libclangsema/Makefile
index 9f69dc1..865c7e7 100644
--- a/lib/clang/libclangsema/Makefile
+++ b/lib/clang/libclangsema/Makefile
@@ -3,21 +3,48 @@
LIB= clangsema
SRCDIR= tools/clang/lib/Sema
-SRCS= AnalysisBasedWarnings.cpp CodeCompleteConsumer.cpp \
- IdentifierResolver.cpp IdentifierResolver.cpp \
- JumpDiagnostics.cpp ParseAST.cpp Sema.cpp SemaAccess.cpp \
- SemaAttr.cpp SemaCXXCast.cpp SemaCXXScopeSpec.cpp \
- SemaChecking.cpp SemaCodeComplete.cpp SemaDecl.cpp \
- SemaDeclAttr.cpp SemaDeclCXX.cpp SemaDeclObjC.cpp \
- SemaExceptionSpec.cpp SemaExpr.cpp SemaExprCXX.cpp \
- SemaExprObjC.cpp SemaInit.cpp SemaLookup.cpp \
- SemaObjCProperty.cpp SemaOverload.cpp SemaStmt.cpp \
- SemaTemplate.cpp SemaTemplateDeduction.cpp \
- SemaTemplateInstantiate.cpp SemaTemplateInstantiate.cpp \
- SemaTemplateInstantiateDecl.cpp SemaType.cpp \
+SRCS= AnalysisBasedWarnings.cpp \
+ CodeCompleteConsumer.cpp \
+ IdentifierResolver.cpp \
+ IdentifierResolver.cpp \
+ JumpDiagnostics.cpp \
+ ParseAST.cpp \
+ Sema.cpp \
+ SemaAccess.cpp \
+ SemaAttr.cpp \
+ SemaCXXCast.cpp \
+ SemaCXXScopeSpec.cpp \
+ SemaChecking.cpp \
+ SemaCodeComplete.cpp \
+ SemaDecl.cpp \
+ SemaDeclAttr.cpp \
+ SemaDeclCXX.cpp \
+ SemaDeclObjC.cpp \
+ SemaExceptionSpec.cpp \
+ SemaExpr.cpp \
+ SemaExprCXX.cpp \
+ SemaExprObjC.cpp \
+ SemaInit.cpp \
+ SemaLookup.cpp \
+ SemaObjCProperty.cpp \
+ SemaOverload.cpp \
+ SemaStmt.cpp \
+ SemaTemplate.cpp \
+ SemaTemplateDeduction.cpp \
+ SemaTemplateInstantiate.cpp \
+ SemaTemplateInstantiate.cpp \
+ SemaTemplateInstantiateDecl.cpp \
+ SemaType.cpp \
TargetAttributesSema.cpp
-TGHDRS= DiagnosticASTKinds DiagnosticCommonKinds DiagnosticParseKinds \
- DiagnosticSemaKinds StmtNodes
+TGHDRS= AttrList \
+ Attrs \
+ DeclNodes \
+ DiagnosticASTKinds \
+ DiagnosticCommonKinds \
+ DiagnosticParseKinds \
+ DiagnosticSemaKinds \
+ StmtNodes \
+ arm_neon
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmanalysis/Makefile b/lib/clang/libllvmanalysis/Makefile
index 1297cb8..0edabfd 100644
--- a/lib/clang/libllvmanalysis/Makefile
+++ b/lib/clang/libllvmanalysis/Makefile
@@ -3,22 +3,51 @@
LIB= llvmanalysis
SRCDIR= lib/Analysis
-SRCS= AliasAnalysis.cpp AliasAnalysisCounter.cpp \
- AliasAnalysisEvaluator.cpp AliasDebugger.cpp \
- AliasSetTracker.cpp Analysis.cpp BasicAliasAnalysis.cpp \
- CFGPrinter.cpp CaptureTracking.cpp ConstantFolding.cpp \
- DbgInfoPrinter.cpp DebugInfo.cpp IVUsers.cpp InlineCost.cpp \
- InstCount.cpp InstructionSimplify.cpp Interval.cpp \
- IntervalPartition.cpp LazyValueInfo.cpp \
- LibCallAliasAnalysis.cpp Lint.cpp LiveValues.cpp \
- LoopDependenceAnalysis.cpp LoopInfo.cpp LoopPass.cpp \
- MemoryBuiltins.cpp MemoryDependenceAnalysis.cpp \
- PHITransAddr.cpp PointerTracking.cpp PostDominators.cpp \
- ProfileEstimatorPass.cpp ProfileInfo.cpp ProfileInfoLoader.cpp \
- ProfileInfoLoaderPass.cpp ProfileVerifierPass.cpp \
- ScalarEvolution.cpp ScalarEvolutionAliasAnalysis.cpp \
- ScalarEvolutionExpander.cpp ScalarEvolutionNormalization.cpp \
- SparsePropagation.cpp ValueTracking.cpp
+SRCS= AliasAnalysis.cpp \
+ AliasAnalysisCounter.cpp \
+ AliasAnalysisEvaluator.cpp \
+ AliasDebugger.cpp \
+ AliasSetTracker.cpp \
+ Analysis.cpp \
+ BasicAliasAnalysis.cpp \
+ CFGPrinter.cpp \
+ CaptureTracking.cpp \
+ ConstantFolding.cpp \
+ DbgInfoPrinter.cpp \
+ DebugInfo.cpp \
+ IVUsers.cpp \
+ InlineCost.cpp \
+ InstCount.cpp \
+ InstructionSimplify.cpp \
+ Interval.cpp \
+ IntervalPartition.cpp \
+ LazyValueInfo.cpp \
+ LibCallAliasAnalysis.cpp \
+ LibCallSemantics.cpp \
+ Lint.cpp \
+ LiveValues.cpp \
+ Loads.cpp \
+ LoopDependenceAnalysis.cpp \
+ LoopInfo.cpp \
+ LoopPass.cpp \
+ MemoryBuiltins.cpp \
+ MemoryDependenceAnalysis.cpp \
+ ModuleDebugInfoPrinter.cpp \
+ PHITransAddr.cpp \
+ PointerTracking.cpp \
+ PostDominators.cpp \
+ ProfileEstimatorPass.cpp \
+ ProfileInfo.cpp \
+ ProfileInfoLoader.cpp \
+ ProfileInfoLoaderPass.cpp \
+ ProfileVerifierPass.cpp \
+ ScalarEvolution.cpp \
+ ScalarEvolutionAliasAnalysis.cpp \
+ ScalarEvolutionExpander.cpp \
+ ScalarEvolutionNormalization.cpp \
+ SparsePropagation.cpp \
+ Trace.cpp \
+ ValueTracking.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmarmasmparser/Makefile b/lib/clang/libllvmarmasmparser/Makefile
index 205ff38..272d955 100644
--- a/lib/clang/libllvmarmasmparser/Makefile
+++ b/lib/clang/libllvmarmasmparser/Makefile
@@ -4,8 +4,11 @@ LIB= llvmarmasmparser
SRCDIR= lib/Target/ARM/AsmParser
INCDIR= lib/Target/ARM
-SRCS= ARMAsmParser.cpp ARMAsmLexer.cpp
+SRCS= ARMAsmParser.cpp \
+ ARMAsmLexer.cpp
-TGHDRS= ARMGenRegisterInfo.h ARMGenRegisterNames ARMGenInstrNames
+TGHDRS= ARMGenInstrNames \
+ ARMGenRegisterInfo.h \
+ ARMGenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmarmasmprinter/Makefile b/lib/clang/libllvmarmasmprinter/Makefile
index b986b88..1566ea5 100644
--- a/lib/clang/libllvmarmasmprinter/Makefile
+++ b/lib/clang/libllvmarmasmprinter/Makefile
@@ -4,9 +4,13 @@ LIB= llvmarmasmprinter
SRCDIR= lib/Target/ARM/AsmPrinter
INCDIR= lib/Target/ARM
-SRCS= ARMAsmPrinter.cpp ARMInstPrinter.cpp ARMMCInstLower.cpp
+SRCS= ARMAsmPrinter.cpp \
+ ARMInstPrinter.cpp \
+ ARMMCInstLower.cpp
-TGHDRS= ARMGenAsmWriter ARMGenInstrNames ARMGenRegisterInfo.h \
+TGHDRS= ARMGenAsmWriter \
+ ARMGenInstrNames \
+ ARMGenRegisterInfo.h \
ARMGenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmarmcodegen/Makefile b/lib/clang/libllvmarmcodegen/Makefile
index 2e09d4b..de34461 100644
--- a/lib/clang/libllvmarmcodegen/Makefile
+++ b/lib/clang/libllvmarmcodegen/Makefile
@@ -3,21 +3,43 @@
LIB= llvmarmcodegen
SRCDIR= lib/Target/ARM
-SRCS= ARMBaseInstrInfo.cpp ARMBaseRegisterInfo.cpp \
- ARMCodeEmitter.cpp ARMConstantIslandPass.cpp \
- ARMConstantPoolValue.cpp ARMExpandPseudoInsts.cpp \
- ARMISelDAGToDAG.cpp ARMISelLowering.cpp ARMInstrInfo.cpp \
- ARMJITInfo.cpp ARMLoadStoreOptimizer.cpp \
- ARMLoadStoreOptimizer.cpp ARMMCAsmInfo.cpp ARMRegisterInfo.cpp \
- ARMSelectionDAGInfo.cpp ARMSubtarget.cpp ARMTargetMachine.cpp \
- ARMTargetObjectFile.cpp NEONMoveFix.cpp NEONPreAllocPass.cpp \
- Thumb1InstrInfo.cpp Thumb1RegisterInfo.cpp \
- Thumb2ITBlockPass.cpp Thumb2InstrInfo.cpp \
- Thumb2RegisterInfo.cpp Thumb2SizeReduction.cpp
+SRCS= ARMBaseInstrInfo.cpp \
+ ARMBaseRegisterInfo.cpp \
+ ARMCodeEmitter.cpp \
+ ARMConstantIslandPass.cpp \
+ ARMConstantPoolValue.cpp \
+ ARMExpandPseudoInsts.cpp \
+ ARMISelDAGToDAG.cpp \
+ ARMISelLowering.cpp \
+ ARMInstrInfo.cpp \
+ ARMJITInfo.cpp \
+ ARMLoadStoreOptimizer.cpp \
+ ARMLoadStoreOptimizer.cpp \
+ ARMMCAsmInfo.cpp \
+ ARMRegisterInfo.cpp \
+ ARMSelectionDAGInfo.cpp \
+ ARMSubtarget.cpp \
+ ARMTargetMachine.cpp \
+ ARMTargetObjectFile.cpp \
+ NEONMoveFix.cpp \
+ NEONPreAllocPass.cpp \
+ Thumb1InstrInfo.cpp \
+ Thumb1RegisterInfo.cpp \
+ Thumb2HazardRecognizer.cpp \
+ Thumb2ITBlockPass.cpp \
+ Thumb2InstrInfo.cpp \
+ Thumb2RegisterInfo.cpp \
+ Thumb2SizeReduction.cpp
-TGHDRS= ARMGenCallingConv ARMGenCodeEmitter ARMGenDAGISel \
- ARMGenInstrInfo ARMGenInstrNames ARMGenRegisterInfo.h \
- ARMGenRegisterInfo ARMGenRegisterNames ARMGenSubtarget \
+TGHDRS= ARMGenCallingConv \
+ ARMGenCodeEmitter \
+ ARMGenDAGISel \
+ ARMGenInstrInfo \
+ ARMGenInstrNames \
+ ARMGenRegisterInfo.h \
+ ARMGenRegisterInfo \
+ ARMGenRegisterNames \
+ ARMGenSubtarget \
Intrinsics
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmarminfo/Makefile b/lib/clang/libllvmarminfo/Makefile
index d087bbb..440b244 100644
--- a/lib/clang/libllvmarminfo/Makefile
+++ b/lib/clang/libllvmarminfo/Makefile
@@ -6,6 +6,7 @@ SRCDIR= lib/Target/ARM/TargetInfo/
INCDIR= lib/Target/ARM
SRCS= ARMTargetInfo.cpp
-TGHDRS= ARMGenRegisterNames ARMGenInstrNames
+TGHDRS= ARMGenInstrNames \
+ ARMGenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmasmparser/Makefile b/lib/clang/libllvmasmparser/Makefile
new file mode 100644
index 0000000..8ceba69
--- /dev/null
+++ b/lib/clang/libllvmasmparser/Makefile
@@ -0,0 +1,10 @@
+# $FreeBSD$
+
+LIB= llvmasmparser
+
+SRCDIR= lib/AsmParser
+SRCS= LLLexer.cpp \
+ LLParser.cpp \
+ Parser.cpp
+
+.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmasmprinter/Makefile b/lib/clang/libllvmasmprinter/Makefile
index 70f13f6..37cceec 100644
--- a/lib/clang/libllvmasmprinter/Makefile
+++ b/lib/clang/libllvmasmprinter/Makefile
@@ -3,7 +3,12 @@
LIB= llvmasmprinter
SRCDIR= lib/CodeGen/AsmPrinter
-SRCS= AsmPrinter.cpp AsmPrinterDwarf.cpp AsmPrinterInlineAsm.cpp \
- DIE.cpp DwarfDebug.cpp DwarfException.cpp OcamlGCPrinter.cpp
+SRCS= AsmPrinter.cpp \
+ AsmPrinterDwarf.cpp \
+ AsmPrinterInlineAsm.cpp \
+ DIE.cpp \
+ DwarfDebug.cpp \
+ DwarfException.cpp \
+ OcamlGCPrinter.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmbitreader/Makefile b/lib/clang/libllvmbitreader/Makefile
index b955f0c..6162801 100644
--- a/lib/clang/libllvmbitreader/Makefile
+++ b/lib/clang/libllvmbitreader/Makefile
@@ -3,7 +3,8 @@
LIB= llvmbitreader
SRCDIR= lib/Bitcode/Reader
-SRCS= BitcodeReader.cpp
+SRCS= BitReader.cpp \
+ BitcodeReader.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmbitwriter/Makefile b/lib/clang/libllvmbitwriter/Makefile
index fbfa453..5e64bdb 100644
--- a/lib/clang/libllvmbitwriter/Makefile
+++ b/lib/clang/libllvmbitwriter/Makefile
@@ -3,7 +3,9 @@
LIB= llvmbitwriter
SRCDIR= lib/Bitcode/Writer
-SRCS= BitcodeWriter.cpp BitcodeWriterPass.cpp \
+SRCS= BitWriter.cpp \
+ BitcodeWriter.cpp \
+ BitcodeWriterPass.cpp \
ValueEnumerator.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmcodegen/Makefile b/lib/clang/libllvmcodegen/Makefile
index e233226..caed836 100644
--- a/lib/clang/libllvmcodegen/Makefile
+++ b/lib/clang/libllvmcodegen/Makefile
@@ -3,36 +3,84 @@
LIB= llvmcodegen
SRCDIR= lib/CodeGen
-SRCS= AggressiveAntiDepBreaker.cpp Analysis.cpp BranchFolding.cpp \
- CalcSpillWeights.cpp CodePlacementOpt.cpp \
- CriticalAntiDepBreaker.cpp DeadMachineInstructionElim.cpp \
- DwarfEHPrepare.cpp ELFCodeEmitter.cpp ELFWriter.cpp \
- ExactHazardRecognizer.cpp GCMetadata.cpp GCStrategy.cpp \
- IfConversion.cpp IntrinsicLowering.cpp LLVMTargetMachine.cpp \
- LatencyPriorityQueue.cpp LiveInterval.cpp \
- LiveIntervalAnalysis.cpp LiveStackAnalysis.cpp \
- LiveVariables.cpp LowerSubregs.cpp MachineBasicBlock.cpp \
- MachineCSE.cpp MachineDominators.cpp MachineFunction.cpp \
- MachineFunctionAnalysis.cpp MachineFunctionPass.cpp \
- MachineFunctionPrinterPass.cpp MachineInstr.cpp \
- MachineLICM.cpp MachineLoopInfo.cpp MachineModuleInfo.cpp \
- MachineModuleInfoImpls.cpp MachinePassRegistry.cpp \
- MachineRegisterInfo.cpp MachineSSAUpdater.cpp MachineSink.cpp \
- MachineVerifier.cpp ObjectCodeEmitter.cpp OcamlGC.cpp \
- OptimizeExts.cpp OptimizePHIs.cpp PHIElimination.cpp \
- Passes.cpp PostRASchedulerList.cpp PreAllocSplitting.cpp \
- ProcessImplicitDefs.cpp PrologEpilogInserter.cpp \
- PseudoSourceValue.cpp RegAllocFast.cpp RegAllocLinearScan.cpp \
- RegAllocLocal.cpp RegAllocPBQP.cpp RegisterCoalescer.cpp \
- RegisterScavenging.cpp ScheduleDAG.cpp ScheduleDAGEmit.cpp \
- ScheduleDAGInstrs.cpp ScheduleDAGPrinter.cpp ShadowStackGC.cpp \
- ShrinkWrapping.cpp SimpleRegisterCoalescing.cpp \
- SjLjEHPrepare.cpp SlotIndexes.cpp Spiller.cpp \
- StackProtector.cpp StackSlotColoring.cpp \
- StrongPHIElimination.cpp TailDuplication.cpp \
- TargetInstrInfoImpl.cpp TargetLoweringObjectFileImpl.cpp \
- TwoAddressInstructionPass.cpp UnreachableBlockElim.cpp \
- VirtRegMap.cpp VirtRegRewriter.cpp
+SRCS= AggressiveAntiDepBreaker.cpp \
+ Analysis.cpp \
+ BranchFolding.cpp \
+ CalcSpillWeights.cpp \
+ CallingConvLower.cpp \
+ CodePlacementOpt.cpp \
+ CriticalAntiDepBreaker.cpp \
+ DeadMachineInstructionElim.cpp \
+ DwarfEHPrepare.cpp \
+ ELFCodeEmitter.cpp \
+ ELFWriter.cpp \
+ GCMetadata.cpp \
+ GCMetadataPrinter.cpp \
+ GCStrategy.cpp \
+ IfConversion.cpp \
+ InlineSpiller.cpp \
+ IntrinsicLowering.cpp \
+ LLVMTargetMachine.cpp \
+ LatencyPriorityQueue.cpp \
+ LiveInterval.cpp \
+ LiveIntervalAnalysis.cpp \
+ LiveStackAnalysis.cpp \
+ LiveVariables.cpp \
+ LowerSubregs.cpp \
+ MachineBasicBlock.cpp \
+ MachineCSE.cpp \
+ MachineDominators.cpp \
+ MachineFunction.cpp \
+ MachineFunctionAnalysis.cpp \
+ MachineFunctionPass.cpp \
+ MachineFunctionPrinterPass.cpp \
+ MachineInstr.cpp \
+ MachineLICM.cpp \
+ MachineLoopInfo.cpp \
+ MachineModuleInfo.cpp \
+ MachineModuleInfoImpls.cpp \
+ MachinePassRegistry.cpp \
+ MachineRegisterInfo.cpp \
+ MachineSSAUpdater.cpp \
+ MachineSink.cpp \
+ MachineVerifier.cpp \
+ ObjectCodeEmitter.cpp \
+ OcamlGC.cpp \
+ OptimizeExts.cpp \
+ OptimizePHIs.cpp \
+ PHIElimination.cpp \
+ Passes.cpp \
+ PostRAHazardRecognizer.cpp \
+ PostRASchedulerList.cpp \
+ PreAllocSplitting.cpp \
+ ProcessImplicitDefs.cpp \
+ PrologEpilogInserter.cpp \
+ PseudoSourceValue.cpp \
+ RegAllocFast.cpp \
+ RegAllocLinearScan.cpp \
+ RegAllocPBQP.cpp \
+ RegisterCoalescer.cpp \
+ RegisterScavenging.cpp \
+ ScheduleDAG.cpp \
+ ScheduleDAGEmit.cpp \
+ ScheduleDAGInstrs.cpp \
+ ScheduleDAGPrinter.cpp \
+ ShadowStackGC.cpp \
+ ShrinkWrapping.cpp \
+ SimpleRegisterCoalescing.cpp \
+ SjLjEHPrepare.cpp \
+ SlotIndexes.cpp \
+ Spiller.cpp \
+ StackProtector.cpp \
+ StackSlotColoring.cpp \
+ StrongPHIElimination.cpp \
+ TailDuplication.cpp \
+ TargetInstrInfoImpl.cpp \
+ TargetLoweringObjectFileImpl.cpp \
+ TwoAddressInstructionPass.cpp \
+ UnreachableBlockElim.cpp \
+ VirtRegMap.cpp \
+ VirtRegRewriter.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmcore/Makefile b/lib/clang/libllvmcore/Makefile
index c8de7bb..1215f4e 100644
--- a/lib/clang/libllvmcore/Makefile
+++ b/lib/clang/libllvmcore/Makefile
@@ -3,15 +3,38 @@
LIB= llvmcore
SRCDIR= lib/VMCore
-SRCS= AsmWriter.cpp Attributes.cpp AutoUpgrade.cpp BasicBlock.cpp \
- ConstantFold.cpp Constants.cpp Core.cpp DebugLoc.cpp \
- Dominators.cpp Function.cpp Globals.cpp IRBuilder.cpp \
- InlineAsm.cpp Instruction.cpp Instructions.cpp \
- IntrinsicInst.cpp LLVMContext.cpp LLVMContextImpl.cpp \
- LeakDetector.cpp Metadata.cpp Module.cpp Pass.cpp \
- PassManager.cpp PrintModulePass.cpp Type.cpp \
- TypeSymbolTable.cpp Use.cpp Value.cpp ValueSymbolTable.cpp \
- ValueTypes.cpp Verifier.cpp
+SRCS= AsmWriter.cpp \
+ Attributes.cpp \
+ AutoUpgrade.cpp \
+ BasicBlock.cpp \
+ ConstantFold.cpp \
+ Constants.cpp \
+ Core.cpp \
+ DebugLoc.cpp \
+ Dominators.cpp \
+ Function.cpp \
+ GVMaterializer.cpp \
+ Globals.cpp \
+ IRBuilder.cpp \
+ InlineAsm.cpp \
+ Instruction.cpp \
+ Instructions.cpp \
+ IntrinsicInst.cpp \
+ LLVMContext.cpp \
+ LLVMContextImpl.cpp \
+ LeakDetector.cpp \
+ Metadata.cpp \
+ Module.cpp \
+ Pass.cpp \
+ PassManager.cpp \
+ PrintModulePass.cpp \
+ Type.cpp \
+ TypeSymbolTable.cpp \
+ Use.cpp \
+ Value.cpp \
+ ValueSymbolTable.cpp \
+ ValueTypes.cpp \
+ Verifier.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvminstcombine/Makefile b/lib/clang/libllvminstcombine/Makefile
index 48ccc05..9f3a602 100644
--- a/lib/clang/libllvminstcombine/Makefile
+++ b/lib/clang/libllvminstcombine/Makefile
@@ -3,12 +3,18 @@
LIB= llvminstcombine
SRCDIR= lib/Transforms/InstCombine
-SRCS= InstCombineAddSub.cpp InstCombineAndOrXor.cpp \
- InstCombineCalls.cpp InstCombineCasts.cpp \
- InstCombineCompares.cpp InstCombineLoadStoreAlloca.cpp \
- InstCombineMulDivRem.cpp InstCombinePHI.cpp \
- InstCombineSelect.cpp InstCombineShifts.cpp \
- InstCombineSimplifyDemanded.cpp InstCombineVectorOps.cpp \
+SRCS= InstCombineAddSub.cpp \
+ InstCombineAndOrXor.cpp \
+ InstCombineCalls.cpp \
+ InstCombineCasts.cpp \
+ InstCombineCompares.cpp \
+ InstCombineLoadStoreAlloca.cpp \
+ InstCombineMulDivRem.cpp \
+ InstCombinePHI.cpp \
+ InstCombineSelect.cpp \
+ InstCombineShifts.cpp \
+ InstCombineSimplifyDemanded.cpp \
+ InstCombineVectorOps.cpp \
InstructionCombining.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmipa/Makefile b/lib/clang/libllvmipa/Makefile
index fa2a1f4..fc241df 100644
--- a/lib/clang/libllvmipa/Makefile
+++ b/lib/clang/libllvmipa/Makefile
@@ -3,7 +3,9 @@
LIB= llvmipa
SRCDIR= lib/Analysis/IPA
-SRCS= CallGraph.cpp CallGraphSCCPass.cpp FindUsedTypes.cpp \
+SRCS= CallGraph.cpp \
+ CallGraphSCCPass.cpp \
+ FindUsedTypes.cpp \
GlobalsModRef.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmipo/Makefile b/lib/clang/libllvmipo/Makefile
index defcec8..24dc9ba 100644
--- a/lib/clang/libllvmipo/Makefile
+++ b/lib/clang/libllvmipo/Makefile
@@ -3,13 +3,28 @@
LIB= llvmipo
SRCDIR= lib/Transforms/IPO
-SRCS= ArgumentPromotion.cpp ConstantMerge.cpp \
- DeadArgumentElimination.cpp DeadTypeElimination.cpp \
- ExtractGV.cpp FunctionAttrs.cpp GlobalDCE.cpp GlobalOpt.cpp \
- IPConstantPropagation.cpp InlineAlways.cpp InlineSimple.cpp \
- Inliner.cpp Internalize.cpp LoopExtractor.cpp LowerSetJmp.cpp \
- MergeFunctions.cpp PartialSpecialization.cpp PruneEH.cpp \
- StripDeadPrototypes.cpp StripSymbols.cpp StructRetPromotion.cpp
+SRCS= ArgumentPromotion.cpp \
+ ConstantMerge.cpp \
+ DeadArgumentElimination.cpp \
+ DeadTypeElimination.cpp \
+ ExtractGV.cpp \
+ FunctionAttrs.cpp \
+ GlobalDCE.cpp \
+ GlobalOpt.cpp \
+ IPConstantPropagation.cpp \
+ IPO.cpp \
+ InlineAlways.cpp \
+ InlineSimple.cpp \
+ Inliner.cpp \
+ Internalize.cpp \
+ LoopExtractor.cpp \
+ LowerSetJmp.cpp \
+ MergeFunctions.cpp \
+ PartialSpecialization.cpp \
+ PruneEH.cpp \
+ StripDeadPrototypes.cpp \
+ StripSymbols.cpp \
+ StructRetPromotion.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmmc/Makefile b/lib/clang/libllvmmc/Makefile
index 84d29c8..92ce186 100644
--- a/lib/clang/libllvmmc/Makefile
+++ b/lib/clang/libllvmmc/Makefile
@@ -3,12 +3,33 @@
LIB= llvmmc
SRCDIR= lib/MC
-SRCS= MCAsmInfo.cpp MCAsmInfoCOFF.cpp MCAsmInfoDarwin.cpp \
- MCAsmStreamer.cpp MCAssembler.cpp MCCodeEmitter.cpp \
- MCContext.cpp MCExpr.cpp MCInst.cpp MCInstPrinter.cpp \
- MCLoggingStreamer.cpp MCMachOStreamer.cpp MCNullStreamer.cpp \
- MCObjectWriter.cpp MCSection.cpp MCSectionCOFF.cpp \
- MCSectionELF.cpp MCSectionMachO.cpp MCStreamer.cpp \
- MCSymbol.cpp MachObjectWriter.cpp TargetAsmBackend.cpp
+SRCS= MCAsmInfo.cpp \
+ MCAsmInfoCOFF.cpp \
+ MCAsmInfoDarwin.cpp \
+ MCAsmStreamer.cpp \
+ MCAssembler.cpp \
+ MCCodeEmitter.cpp \
+ MCContext.cpp \
+ MCDisassembler.cpp \
+ MCExpr.cpp \
+ MCInst.cpp \
+ MCInstPrinter.cpp \
+ MCLabel.cpp \
+ MCLoggingStreamer.cpp \
+ MCMachOStreamer.cpp \
+ MCNullStreamer.cpp \
+ MCObjectStreamer.cpp \
+ MCObjectWriter.cpp \
+ MCSection.cpp \
+ MCSectionCOFF.cpp \
+ MCSectionELF.cpp \
+ MCSectionMachO.cpp \
+ MCStreamer.cpp \
+ MCSymbol.cpp \
+ MCValue.cpp \
+ MachObjectWriter.cpp \
+ TargetAsmBackend.cpp \
+ WinCOFFObjectWriter.cpp \
+ WinCOFFStreamer.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmcparser/Makefile b/lib/clang/libllvmmcparser/Makefile
index df93925..9354af5 100644
--- a/lib/clang/libllvmmcparser/Makefile
+++ b/lib/clang/libllvmmcparser/Makefile
@@ -3,7 +3,13 @@
LIB= llvmmcparser
SRCDIR= lib/MC/MCParser
-SRCS= AsmLexer.cpp AsmParser.cpp MCAsmLexer.cpp MCAsmParser.cpp \
+SRCS= AsmLexer.cpp \
+ AsmParser.cpp \
+ DarwinAsmParser.cpp \
+ ELFAsmParser.cpp \
+ MCAsmLexer.cpp \
+ MCAsmParser.cpp \
+ MCAsmParserExtension.cpp \
TargetAsmParser.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipsasmprinter/Makefile b/lib/clang/libllvmmipsasmprinter/Makefile
index 2ee9c8e..6c21785 100644
--- a/lib/clang/libllvmmipsasmprinter/Makefile
+++ b/lib/clang/libllvmmipsasmprinter/Makefile
@@ -6,7 +6,9 @@ SRCDIR= lib/Target/Mips/AsmPrinter
INCDIR= lib/Target/Mips
SRCS= MipsAsmPrinter.cpp
-TGHDRS= MipsGenAsmWriter MipsGenInstrNames MipsGenRegisterInfo.h \
+TGHDRS= MipsGenAsmWriter \
+ MipsGenInstrNames \
+ MipsGenRegisterInfo.h \
MipsGenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipscodegen/Makefile b/lib/clang/libllvmmipscodegen/Makefile
index 8d16aa9..d9b280f 100644
--- a/lib/clang/libllvmmipscodegen/Makefile
+++ b/lib/clang/libllvmmipscodegen/Makefile
@@ -3,13 +3,26 @@
LIB= llvmmipscodegen
SRCDIR= lib/Target/Mips
-SRCS= MipsDelaySlotFiller.cpp MipsISelDAGToDAG.cpp \
- MipsISelLowering.cpp MipsInstrInfo.cpp MipsMCAsmInfo.cpp \
- MipsRegisterInfo.cpp MipsSelectionDAGInfo.cpp MipsSubtarget.cpp \
- MipsTargetMachine.cpp MipsTargetObjectFile.cpp
-
-TGHDRS= Intrinsics MipsGenAsmWriter MipsGenCallingConv MipsGenDAGISel \
- MipsGenInstrInfo MipsGenInstrNames MipsGenRegisterInfo.h \
- MipsGenRegisterInfo MipsGenRegisterNames MipsGenSubtarget
+SRCS= MipsDelaySlotFiller.cpp \
+ MipsISelDAGToDAG.cpp \
+ MipsISelLowering.cpp \
+ MipsInstrInfo.cpp \
+ MipsMCAsmInfo.cpp \
+ MipsRegisterInfo.cpp \
+ MipsSelectionDAGInfo.cpp \
+ MipsSubtarget.cpp \
+ MipsTargetMachine.cpp \
+ MipsTargetObjectFile.cpp
+
+TGHDRS= Intrinsics \
+ MipsGenAsmWriter \
+ MipsGenCallingConv \
+ MipsGenDAGISel \
+ MipsGenInstrInfo \
+ MipsGenInstrNames \
+ MipsGenRegisterInfo.h \
+ MipsGenRegisterInfo \
+ MipsGenRegisterNames \
+ MipsGenSubtarget
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmmipsinfo/Makefile b/lib/clang/libllvmmipsinfo/Makefile
index 7a8a5eb..127c30c 100644
--- a/lib/clang/libllvmmipsinfo/Makefile
+++ b/lib/clang/libllvmmipsinfo/Makefile
@@ -6,6 +6,7 @@ SRCDIR= lib/Target/Mips/TargetInfo/
INCDIR= lib/Target/Mips
SRCS= MipsTargetInfo.cpp
-TGHDRS= MipsGenRegisterNames MipsGenInstrNames
+TGHDRS= MipsGenRegisterNames \
+ MipsGenInstrNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmpowerpcasmprinter/Makefile b/lib/clang/libllvmpowerpcasmprinter/Makefile
index 8b8eb2a..15559b3 100644
--- a/lib/clang/libllvmpowerpcasmprinter/Makefile
+++ b/lib/clang/libllvmpowerpcasmprinter/Makefile
@@ -6,7 +6,9 @@ SRCDIR= lib/Target/PowerPC/AsmPrinter
INCDIR= lib/Target/PowerPC
SRCS= PPCAsmPrinter.cpp
-TGHDRS= PPCGenAsmWriter PPCGenInstrNames PPCGenRegisterInfo.h \
+TGHDRS= PPCGenAsmWriter \
+ PPCGenInstrNames \
+ PPCGenRegisterInfo.h \
PPCGenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmpowerpccodegen/Makefile b/lib/clang/libllvmpowerpccodegen/Makefile
index 5248681..4c66f82 100644
--- a/lib/clang/libllvmpowerpccodegen/Makefile
+++ b/lib/clang/libllvmpowerpccodegen/Makefile
@@ -3,14 +3,29 @@
LIB= llvmpowerpccodegen
SRCDIR= lib/Target/PowerPC
-SRCS= PPCBranchSelector.cpp PPCCodeEmitter.cpp \
- PPCHazardRecognizers.cpp PPCISelDAGToDAG.cpp \
- PPCISelLowering.cpp PPCInstrInfo.cpp PPCJITInfo.cpp \
- PPCMCAsmInfo.cpp PPCPredicates.cpp PPCRegisterInfo.cpp \
- PPCSelectionDAGInfo.cpp PPCSubtarget.cpp PPCTargetMachine.cpp
+SRCS= PPCBranchSelector.cpp \
+ PPCCodeEmitter.cpp \
+ PPCHazardRecognizers.cpp \
+ PPCISelDAGToDAG.cpp \
+ PPCISelLowering.cpp \
+ PPCInstrInfo.cpp \
+ PPCJITInfo.cpp \
+ PPCMCAsmInfo.cpp \
+ PPCPredicates.cpp \
+ PPCRegisterInfo.cpp \
+ PPCSelectionDAGInfo.cpp \
+ PPCSubtarget.cpp \
+ PPCTargetMachine.cpp
-TGHDRS= Intrinsics PPCGenCallingConv PPCGenCodeEmitter PPCGenDAGISel \
- PPCGenInstrInfo PPCGenInstrNames PPCGenRegisterInfo.h \
- PPCGenRegisterInfo PPCGenRegisterNames PPCGenSubtarget
+TGHDRS= Intrinsics \
+ PPCGenCallingConv \
+ PPCGenCodeEmitter \
+ PPCGenDAGISel \
+ PPCGenInstrInfo \
+ PPCGenInstrNames \
+ PPCGenRegisterInfo.h \
+ PPCGenRegisterInfo \
+ PPCGenRegisterNames \
+ PPCGenSubtarget
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmpowerpcinfo/Makefile b/lib/clang/libllvmpowerpcinfo/Makefile
index 19ceb45..08c91e0 100644
--- a/lib/clang/libllvmpowerpcinfo/Makefile
+++ b/lib/clang/libllvmpowerpcinfo/Makefile
@@ -6,6 +6,7 @@ SRCDIR= lib/Target/PowerPC/TargetInfo/
INCDIR= lib/Target/PowerPC
SRCS= PowerPCTargetInfo.cpp
-TGHDRS= PPCGenRegisterNames PPCGenInstrNames
+TGHDRS= PPCGenRegisterNames \
+ PPCGenInstrNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmscalaropts/Makefile b/lib/clang/libllvmscalaropts/Makefile
index 5d181cb..aee5d3e 100644
--- a/lib/clang/libllvmscalaropts/Makefile
+++ b/lib/clang/libllvmscalaropts/Makefile
@@ -3,15 +3,35 @@
LIB= llvmscalaropts
SRCDIR= lib/Transforms/Scalar
-SRCS= ADCE.cpp BasicBlockPlacement.cpp CodeGenPrepare.cpp \
- ConstantProp.cpp DCE.cpp DeadStoreElimination.cpp \
- GEPSplitter.cpp GVN.cpp IndVarSimplify.cpp JumpThreading.cpp \
- LICM.cpp LoopDeletion.cpp LoopIndexSplit.cpp LoopRotation.cpp \
- LoopStrengthReduce.cpp LoopUnrollPass.cpp LoopUnswitch.cpp \
- MemCpyOptimizer.cpp Reassociate.cpp Reg2Mem.cpp SCCP.cpp \
- ScalarReplAggregates.cpp SimplifyCFGPass.cpp \
- SimplifyHalfPowrLibCalls.cpp SimplifyLibCalls.cpp \
- TailDuplication.cpp TailRecursionElimination.cpp
+SRCS= ADCE.cpp \
+ BasicBlockPlacement.cpp \
+ CodeGenPrepare.cpp \
+ ConstantProp.cpp \
+ DCE.cpp \
+ DeadStoreElimination.cpp \
+ GEPSplitter.cpp \
+ GVN.cpp \
+ IndVarSimplify.cpp \
+ JumpThreading.cpp \
+ LICM.cpp \
+ LoopDeletion.cpp \
+ LoopIndexSplit.cpp \
+ LoopRotation.cpp \
+ LoopStrengthReduce.cpp \
+ LoopUnrollPass.cpp \
+ LoopUnswitch.cpp \
+ MemCpyOptimizer.cpp \
+ Reassociate.cpp \
+ Reg2Mem.cpp \
+ SCCP.cpp \
+ Scalar.cpp \
+ ScalarReplAggregates.cpp \
+ SimplifyCFGPass.cpp \
+ Sink.cpp \
+ SimplifyHalfPowrLibCalls.cpp \
+ SimplifyLibCalls.cpp \
+ TailDuplication.cpp \
+ TailRecursionElimination.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmselectiondag/Makefile b/lib/clang/libllvmselectiondag/Makefile
index ad9e5dc..6c508c1 100644
--- a/lib/clang/libllvmselectiondag/Makefile
+++ b/lib/clang/libllvmselectiondag/Makefile
@@ -3,13 +3,26 @@
LIB= llvmselectiondag
SRCDIR= lib/CodeGen/SelectionDAG
-SRCS= CallingConvLower.cpp DAGCombiner.cpp FastISel.cpp \
- FunctionLoweringInfo.cpp InstrEmitter.cpp LegalizeDAG.cpp \
- LegalizeFloatTypes.cpp LegalizeIntegerTypes.cpp LegalizeTypes.cpp \
- LegalizeTypesGeneric.cpp LegalizeVectorOps.cpp LegalizeVectorTypes.cpp \
- ScheduleDAGFast.cpp ScheduleDAGList.cpp ScheduleDAGRRList.cpp \
- ScheduleDAGSDNodes.cpp SelectionDAG.cpp SelectionDAGBuilder.cpp \
- SelectionDAGISel.cpp SelectionDAGPrinter.cpp TargetLowering.cpp \
+SRCS= DAGCombiner.cpp \
+ FastISel.cpp \
+ FunctionLoweringInfo.cpp \
+ InstrEmitter.cpp \
+ LegalizeDAG.cpp \
+ LegalizeFloatTypes.cpp \
+ LegalizeIntegerTypes.cpp \
+ LegalizeTypes.cpp \
+ LegalizeTypesGeneric.cpp \
+ LegalizeVectorOps.cpp \
+ LegalizeVectorTypes.cpp \
+ ScheduleDAGFast.cpp \
+ ScheduleDAGList.cpp \
+ ScheduleDAGRRList.cpp \
+ ScheduleDAGSDNodes.cpp \
+ SelectionDAG.cpp \
+ SelectionDAGBuilder.cpp \
+ SelectionDAGISel.cpp \
+ SelectionDAGPrinter.cpp \
+ TargetLowering.cpp \
TargetSelectionDAGInfo.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmsupport/Makefile b/lib/clang/libllvmsupport/Makefile
index ee3ec76..3a96db2 100644
--- a/lib/clang/libllvmsupport/Makefile
+++ b/lib/clang/libllvmsupport/Makefile
@@ -3,16 +3,50 @@
LIB= llvmsupport
SRCDIR= lib/Support
-SRCS= APFloat.cpp APInt.cpp APSInt.cpp Allocator.cpp CommandLine.cpp \
- ConstantRange.cpp Debug.cpp DeltaAlgorithm.cpp Dwarf.cpp \
- ErrorHandling.cpp FileUtilities.cpp FoldingSet.cpp \
- FormattedStream.cpp GraphWriter.cpp ManagedStatic.cpp \
- MemoryBuffer.cpp PluginLoader.cpp PrettyStackTrace.cpp \
- Regex.cpp SlowOperationInformer.cpp SmallPtrSet.cpp \
- SmallVector.cpp SourceMgr.cpp Statistic.cpp StringExtras.cpp \
- StringMap.cpp StringPool.cpp StringRef.cpp TargetRegistry.cpp \
- Timer.cpp Triple.cpp Twine.cpp circular_raw_ostream.cpp \
- raw_os_ostream.cpp raw_ostream.cpp regcomp.c regerror.c \
- regexec.c regfree.c regstrlcpy.c
+SRCS= APFloat.cpp \
+ APInt.cpp \
+ APSInt.cpp \
+ Allocator.cpp \
+ CommandLine.cpp \
+ ConstantRange.cpp \
+ DAGDeltaAlgorithm.cpp \
+ Debug.cpp \
+ DeltaAlgorithm.cpp \
+ Dwarf.cpp \
+ ErrorHandling.cpp \
+ FileUtilities.cpp \
+ FoldingSet.cpp \
+ FormattedStream.cpp \
+ GraphWriter.cpp \
+ IsInf.cpp \
+ IsNAN.cpp \
+ ManagedStatic.cpp \
+ MemoryBuffer.cpp \
+ MemoryObject.cpp \
+ PluginLoader.cpp \
+ PrettyStackTrace.cpp \
+ Regex.cpp \
+ SlowOperationInformer.cpp \
+ SmallPtrSet.cpp \
+ SmallVector.cpp \
+ SourceMgr.cpp \
+ Statistic.cpp \
+ StringExtras.cpp \
+ StringMap.cpp \
+ StringPool.cpp \
+ StringRef.cpp \
+ SystemUtils.cpp \
+ TargetRegistry.cpp \
+ Timer.cpp \
+ Triple.cpp \
+ Twine.cpp \
+ circular_raw_ostream.cpp \
+ raw_os_ostream.cpp \
+ raw_ostream.cpp \
+ regcomp.c \
+ regerror.c \
+ regexec.c \
+ regfree.c \
+ regstrlcpy.c
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmsystem/Makefile b/lib/clang/libllvmsystem/Makefile
index 6274a4d..ae8cdc9 100644
--- a/lib/clang/libllvmsystem/Makefile
+++ b/lib/clang/libllvmsystem/Makefile
@@ -3,9 +3,24 @@
LIB= llvmsystem
SRCDIR= lib/System
-SRCS= Atomic.cpp DynamicLibrary.cpp Errno.cpp Host.cpp Memory.cpp \
- Mutex.cpp Path.cpp Process.cpp Program.cpp RWMutex.cpp \
- SearchForAddressOfSpecialSymbol.cpp Signals.cpp \
- ThreadLocal.cpp Threading.cpp TimeValue.cpp Valgrind.cpp
+SRCS= Alarm.cpp \
+ Atomic.cpp \
+ Disassembler.cpp \
+ DynamicLibrary.cpp \
+ Errno.cpp \
+ Host.cpp \
+ IncludeFile.cpp \
+ Memory.cpp \
+ Mutex.cpp \
+ Path.cpp \
+ Process.cpp \
+ Program.cpp \
+ RWMutex.cpp \
+ SearchForAddressOfSpecialSymbol.cpp \
+ Signals.cpp \
+ ThreadLocal.cpp \
+ Threading.cpp \
+ TimeValue.cpp \
+ Valgrind.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmtarget/Makefile b/lib/clang/libllvmtarget/Makefile
index d468331..de00bc6 100644
--- a/lib/clang/libllvmtarget/Makefile
+++ b/lib/clang/libllvmtarget/Makefile
@@ -3,9 +3,18 @@
LIB= llvmtarget
SRCDIR= lib/Target
-SRCS= Mangler.cpp SubtargetFeature.cpp Target.cpp TargetAsmLexer.cpp \
- TargetData.cpp TargetELFWriterInfo.cpp TargetFrameInfo.cpp \
- TargetInstrInfo.cpp TargetLoweringObjectFile.cpp \
- TargetMachine.cpp TargetRegisterInfo.cpp TargetSubtarget.cpp
+SRCS= Mangler.cpp \
+ SubtargetFeature.cpp \
+ Target.cpp \
+ TargetAsmLexer.cpp \
+ TargetData.cpp \
+ TargetELFWriterInfo.cpp \
+ TargetFrameInfo.cpp \
+ TargetInstrInfo.cpp \
+ TargetIntrinsicInfo.cpp \
+ TargetLoweringObjectFile.cpp \
+ TargetMachine.cpp \
+ TargetRegisterInfo.cpp \
+ TargetSubtarget.cpp
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmtransformutils/Makefile b/lib/clang/libllvmtransformutils/Makefile
index 7e5e052..b4bdbec 100644
--- a/lib/clang/libllvmtransformutils/Makefile
+++ b/lib/clang/libllvmtransformutils/Makefile
@@ -3,14 +3,31 @@
LIB= llvmtransformutils
SRCDIR= lib/Transforms/Utils
-SRCS= AddrModeMatcher.cpp BasicBlockUtils.cpp BasicInliner.cpp \
- BreakCriticalEdges.cpp BuildLibCalls.cpp CloneFunction.cpp \
- CloneLoop.cpp CloneModule.cpp CodeExtractor.cpp \
- DemoteRegToStack.cpp InlineFunction.cpp InstructionNamer.cpp \
- LCSSA.cpp Local.cpp LoopSimplify.cpp LoopUnroll.cpp \
- LowerInvoke.cpp LowerSwitch.cpp Mem2Reg.cpp \
- PromoteMemoryToRegister.cpp SSAUpdater.cpp SSI.cpp \
- SimplifyCFG.cpp UnifyFunctionExitNodes.cpp ValueMapper.cpp
+SRCS= AddrModeMatcher.cpp \
+ BasicBlockUtils.cpp \
+ BasicInliner.cpp \
+ BreakCriticalEdges.cpp \
+ BuildLibCalls.cpp \
+ CloneFunction.cpp \
+ CloneLoop.cpp \
+ CloneModule.cpp \
+ CodeExtractor.cpp \
+ DemoteRegToStack.cpp \
+ InlineFunction.cpp \
+ InstructionNamer.cpp \
+ LCSSA.cpp \
+ Local.cpp \
+ LoopSimplify.cpp \
+ LoopUnroll.cpp \
+ LowerInvoke.cpp \
+ LowerSwitch.cpp \
+ Mem2Reg.cpp \
+ PromoteMemoryToRegister.cpp \
+ SSAUpdater.cpp \
+ SSI.cpp \
+ SimplifyCFG.cpp \
+ UnifyFunctionExitNodes.cpp \
+ ValueMapper.cpp
TGHDRS= Intrinsics
diff --git a/lib/clang/libllvmx86asmparser/Makefile b/lib/clang/libllvmx86asmparser/Makefile
index 717e75f..49f4b79 100644
--- a/lib/clang/libllvmx86asmparser/Makefile
+++ b/lib/clang/libllvmx86asmparser/Makefile
@@ -4,8 +4,11 @@ LIB= llvmx86asmparser
SRCDIR= lib/Target/X86/AsmParser
INCDIR= lib/Target/X86
-SRCS= X86AsmParser.cpp X86AsmLexer.cpp
+SRCS= X86AsmLexer.cpp \
+ X86AsmParser.cpp
-TGHDRS= X86GenRegisterNames X86GenInstrNames X86GenAsmMatcher
+TGHDRS= X86GenAsmMatcher \
+ X86GenInstrNames \
+ X86GenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmx86asmprinter/Makefile b/lib/clang/libllvmx86asmprinter/Makefile
index ba4e23f..9a21b5e 100644
--- a/lib/clang/libllvmx86asmprinter/Makefile
+++ b/lib/clang/libllvmx86asmprinter/Makefile
@@ -4,10 +4,16 @@ LIB= llvmx86asmprinter
SRCDIR= lib/Target/X86/AsmPrinter
INCDIR= lib/Target/X86
-SRCS= X86ATTInstPrinter.cpp X86AsmPrinter.cpp \
- X86IntelInstPrinter.cpp X86MCInstLower.cpp
+SRCS= X86ATTInstPrinter.cpp \
+ X86AsmPrinter.cpp \
+ X86IntelInstPrinter.cpp \
+ X86MCInstLower.cpp
-TGHDRS= X86GenAsmWriter1 X86GenAsmWriter X86GenInstrInfo \
- X86GenInstrNames X86GenRegisterInfo.h X86GenRegisterNames
+TGHDRS= X86GenAsmWriter1 \
+ X86GenAsmWriter \
+ X86GenInstrInfo \
+ X86GenInstrNames \
+ X86GenRegisterInfo.h \
+ X86GenRegisterNames
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmx86codegen/Makefile b/lib/clang/libllvmx86codegen/Makefile
index 1c77bb8..df44a2d 100644
--- a/lib/clang/libllvmx86codegen/Makefile
+++ b/lib/clang/libllvmx86codegen/Makefile
@@ -3,17 +3,35 @@
LIB= llvmx86codegen
SRCDIR= lib/Target/X86
-SRCS= SSEDomainFix.cpp X86AsmBackend.cpp \
- X86COFFMachineModuleInfo.cpp X86CodeEmitter.cpp \
- X86ELFWriterInfo.cpp X86FastISel.cpp X86FloatingPoint.cpp \
- X86FloatingPointRegKill.cpp X86ISelDAGToDAG.cpp \
- X86ISelLowering.cpp X86InstrInfo.cpp X86JITInfo.cpp \
- X86MCAsmInfo.cpp X86MCCodeEmitter.cpp X86RegisterInfo.cpp \
- X86SelectionDAGInfo.cpp X86Subtarget.cpp X86TargetMachine.cpp \
+SRCS= SSEDomainFix.cpp \
+ X86AsmBackend.cpp \
+ X86COFFMachineModuleInfo.cpp \
+ X86CodeEmitter.cpp \
+ X86ELFWriterInfo.cpp \
+ X86FastISel.cpp \
+ X86FloatingPoint.cpp \
+ X86FloatingPointRegKill.cpp \
+ X86ISelDAGToDAG.cpp \
+ X86ISelLowering.cpp \
+ X86InstrInfo.cpp \
+ X86JITInfo.cpp \
+ X86MCAsmInfo.cpp \
+ X86MCCodeEmitter.cpp \
+ X86RegisterInfo.cpp \
+ X86SelectionDAGInfo.cpp \
+ X86Subtarget.cpp \
+ X86TargetMachine.cpp \
X86TargetObjectFile.cpp
-TGHDRS= Intrinsics X86GenCallingConv X86GenDAGISel X86GenFastISel \
- X86GenInstrInfo X86GenInstrNames X86GenRegisterInfo.h \
- X86GenRegisterInfo X86GenRegisterNames X86GenSubtarget
+TGHDRS= Intrinsics \
+ X86GenCallingConv \
+ X86GenDAGISel \
+ X86GenFastISel \
+ X86GenInstrInfo \
+ X86GenInstrNames \
+ X86GenRegisterInfo.h \
+ X86GenRegisterInfo \
+ X86GenRegisterNames \
+ X86GenSubtarget
.include "../clang.lib.mk"
diff --git a/lib/clang/libllvmx86info/Makefile b/lib/clang/libllvmx86info/Makefile
index b7aa9ab..6670a24 100644
--- a/lib/clang/libllvmx86info/Makefile
+++ b/lib/clang/libllvmx86info/Makefile
@@ -6,6 +6,7 @@ SRCDIR= lib/Target/X86/TargetInfo/
INCDIR= lib/Target/X86
SRCS= X86TargetInfo.cpp
-TGHDRS= X86GenRegisterNames X86GenInstrNames
+TGHDRS= X86GenInstrNames \
+ X86GenRegisterNames
.include "../clang.lib.mk"
diff --git a/usr.bin/clang/clang/Makefile b/usr.bin/clang/clang/Makefile
index d1c2254..670b57b 100644
--- a/usr.bin/clang/clang/Makefile
+++ b/usr.bin/clang/clang/Makefile
@@ -5,24 +5,64 @@
PROG_CXX=clang
SRCDIR= tools/clang/tools/driver
-SRCS= cc1_main.cpp cc1as_main.cpp driver.cpp
+SRCS= cc1_main.cpp \
+ cc1as_main.cpp \
+ driver.cpp
MAN=
LINKS= ${BINDIR}/clang ${BINDIR}/clang++
-TGHDRS= CC1AsOptions CC1Options DiagnosticCommonKinds \
- DiagnosticDriverKinds DiagnosticFrontendKinds \
- DiagnosticLexKinds DiagnosticSemaKinds Options
-LIBDEPS=clangfrontend clangdriver clangcodegen clangsema clangchecker \
- clanganalysis clangrewrite clangast clangparse clanglex clangbasic \
- \
- llvminstcombine llvmipo llvmbitwriter llvmbitreader \
- llvmpowerpccodegen llvmpowerpcasmprinter llvmpowerpcinfo \
- llvmx86asmparser llvmx86asmprinter llvmx86codegen llvmx86info \
- llvmmipsasmprinter llvmmipscodegen llvmmipsinfo \
- llvmarmasmparser llvmarmasmprinter llvmarmcodegen \
- llvmselectiondag llvmasmprinter llvmcodegen llvmscalaropts \
- llvmtransformutils llvmmc llvmmcparser llvmipa llvmanalysis \
- llvmtarget llvmmc llvmcore llvmarminfo llvmsupport llvmsystem
+TGHDRS= CC1AsOptions \
+ CC1Options \
+ DiagnosticCommonKinds \
+ DiagnosticDriverKinds \
+ DiagnosticFrontendKinds \
+ DiagnosticLexKinds \
+ DiagnosticSemaKinds \
+ Options
+LIBDEPS=clangfrontend \
+ clangdriver \
+ clangcodegen \
+ clangsema \
+ clangchecker \
+ clanganalysis \
+ clangrewrite \
+ clangast \
+ clangparse \
+ clanglex \
+ clangbasic \
+ llvminstcombine \
+ llvmipo \
+ llvmbitwriter \
+ llvmbitreader \
+ llvmpowerpccodegen \
+ llvmpowerpcasmprinter \
+ llvmpowerpcinfo \
+ llvmx86asmparser \
+ llvmx86asmprinter \
+ llvmx86codegen \
+ llvmx86info \
+ llvmmipsasmprinter \
+ llvmmipscodegen \
+ llvmmipsinfo \
+ llvmarmasmparser \
+ llvmarmasmprinter \
+ llvmarmcodegen \
+ llvmasmparser \
+ llvmselectiondag \
+ llvmasmprinter \
+ llvmcodegen \
+ llvmscalaropts \
+ llvmtransformutils \
+ llvmmc \
+ llvmmcparser \
+ llvmipa \
+ llvmanalysis \
+ llvmtarget \
+ llvmmc \
+ llvmcore \
+ llvmarminfo \
+ llvmsupport \
+ llvmsystem
.include "../clang.prog.mk"
diff --git a/usr.bin/clang/tblgen/Makefile b/usr.bin/clang/tblgen/Makefile
index ea338bf..9cc6878 100644
--- a/usr.bin/clang/tblgen/Makefile
+++ b/usr.bin/clang/tblgen/Makefile
@@ -3,19 +3,42 @@
PROG_CXX=tblgen
SRCDIR= utils/TableGen
-SRCS= ARMDecoderEmitter.cpp AsmMatcherEmitter.cpp \
- AsmWriterEmitter.cpp AsmWriterInst.cpp CallingConvEmitter.cpp \
- ClangASTNodesEmitter.cpp ClangDiagnosticsEmitter.cpp \
- CodeEmitterGen.cpp CodeGenDAGPatterns.cpp \
- CodeGenInstruction.cpp CodeGenTarget.cpp DAGISelEmitter.cpp \
- DAGISelMatcher.cpp DAGISelMatcherEmitter.cpp \
- DAGISelMatcherGen.cpp DAGISelMatcherOpt.cpp \
- DisassemblerEmitter.cpp EDEmitter.cpp FastISelEmitter.cpp \
- InstrEnumEmitter.cpp InstrInfoEmitter.cpp IntrinsicEmitter.cpp \
- LLVMCConfigurationEmitter.cpp OptParserEmitter.cpp Record.cpp \
- RegisterInfoEmitter.cpp SubtargetEmitter.cpp TGLexer.cpp \
- TGParser.cpp TGValueTypes.cpp TableGen.cpp TableGenBackend.cpp \
- X86DisassemblerTables.cpp X86RecognizableInstr.cpp
+SRCS= ARMDecoderEmitter.cpp \
+ AsmMatcherEmitter.cpp \
+ AsmWriterEmitter.cpp \
+ AsmWriterInst.cpp \
+ CallingConvEmitter.cpp \
+ ClangASTNodesEmitter.cpp \
+ ClangAttrEmitter.cpp \
+ ClangDiagnosticsEmitter.cpp \
+ CodeEmitterGen.cpp \
+ CodeGenDAGPatterns.cpp \
+ CodeGenInstruction.cpp \
+ CodeGenTarget.cpp \
+ DAGISelEmitter.cpp \
+ DAGISelMatcher.cpp \
+ DAGISelMatcherEmitter.cpp \
+ DAGISelMatcherGen.cpp \
+ DAGISelMatcherOpt.cpp \
+ DisassemblerEmitter.cpp \
+ EDEmitter.cpp \
+ FastISelEmitter.cpp \
+ InstrEnumEmitter.cpp \
+ InstrInfoEmitter.cpp \
+ IntrinsicEmitter.cpp \
+ LLVMCConfigurationEmitter.cpp \
+ NeonEmitter.cpp \
+ OptParserEmitter.cpp \
+ Record.cpp \
+ RegisterInfoEmitter.cpp \
+ SubtargetEmitter.cpp \
+ TGLexer.cpp \
+ TGParser.cpp \
+ TGValueTypes.cpp \
+ TableGen.cpp \
+ TableGenBackend.cpp \
+ X86DisassemblerTables.cpp \
+ X86RecognizableInstr.cpp
MAN=
LIBDEPS=llvmsupport llvmsystem
OpenPOWER on IntegriCloud